hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
488dfd6b81f1b6eb5a331dd21f471a271826a1f8.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang) * * See LICENSE for clarification regarding multiple authors */ #include <cstdlib> #include "k2/csrc/array_ops.h" #include "k2/csrc/benchmark/benchmark.h" #include "k2/csrc/test_utils.h" namespace k2 { template <typename T> static BenchmarkStat BenchmarkExclusiveSum(int32_t dim, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } int32_t num_iter = ::min(500, 1000000 / dim); Array1<T> src = RandUniformArray1<T>(context, dim, -1000, 1000, GetSeed()); BenchmarkStat stat; stat.op_name = "ExclusiveSum"; stat.num_iter = num_iter; stat.problem_size = dim; stat.dtype_name = TraitsOf(DtypeOf<T>::dtype).Name(); stat.device_type = device_type; // there are overloads of ExclusiveSum, so we use an explicit conversion here. stat.eplased_per_iter = BenchmarkOp(num_iter, context, (Array1<T>(*)(const Array1<T> &))(&ExclusiveSum<T>), src); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } static BenchmarkStat BenchmarkRowSplitsToRowIds(int32_t dim, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } int32_t num_iter = ::min(500, 1000000 / dim); Array1<int32_t> sizes = RandUniformArray1<int32_t>(context, dim, 0, 1000, GetSeed()); Array1<int32_t> row_splits = ExclusiveSum(sizes); Array1<int32_t> row_ids(context, row_splits.Back()); BenchmarkStat stat; stat.op_name = "RowSplitsToRowIds_" + std::to_string(row_ids.Dim()); stat.num_iter = num_iter; stat.problem_size = dim; stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name(); stat.device_type = device_type; // there are overloads of RowSplitsToRowIds, // so we use an explicit conversion here. stat.eplased_per_iter = BenchmarkOp(num_iter, context, (void (*)(const Array1<int32_t> &, Array1<int32_t> *))( &RowSplitsToRowIds), row_splits, &row_ids); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } static BenchmarkStat BenchmarkRowIdsToRowSplits(int32_t dim, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } int32_t num_iter = ::min(500, 1000000 / dim); Array1<int32_t> sizes = RandUniformArray1<int32_t>(context, dim, 0, 1000, GetSeed()); Array1<int32_t> row_splits = ExclusiveSum(sizes); Array1<int32_t> row_ids(context, row_splits.Back()); RowSplitsToRowIds(row_splits, &row_ids); BenchmarkStat stat; stat.op_name = "RowIdsToRowSplits"; stat.num_iter = num_iter; stat.problem_size = dim; stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name(); stat.device_type = device_type; // there are overloads of RowIdsToRowSplits, // so we use an explicit conversion here. stat.eplased_per_iter = BenchmarkOp(num_iter, context, (void (*)(const Array1<int32_t> &, Array1<int32_t> *))( &RowIdsToRowSplits), row_ids, &row_splits); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } template <typename T> static BenchmarkStat BenchmarkAppend(int32_t num_array, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } std::vector<Array1<T>> arrays_vec(num_array); std::vector<const Array1<T> *> arrays(num_array); int32_t total_size = 0, max_size = 0; // notice `j != num_array - 1` below, we may push a very long array // after the loop for (int32_t j = 0; j != num_array - 1; ++j) { int32_t curr_array_size = RandInt(0, 10000); std::vector<T> data(curr_array_size); std::iota(data.begin(), data.end(), total_size); total_size += curr_array_size; arrays_vec[j] = Array1<T>(context, data); arrays[j] = &arrays_vec[j]; if (curr_array_size > max_size) max_size = curr_array_size; } { // below we may generate an array with very large size depend on the value // of RandInt(0,1) int32_t average_size = total_size / num_array; int32_t curr_array_size = RandInt(0, 1) == 0 ? RandInt(0, 10000) : average_size * 10; std::vector<T> data(curr_array_size); std::iota(data.begin(), data.end(), total_size); total_size += curr_array_size; arrays_vec[num_array - 1] = Array1<T>(context, data); arrays[num_array - 1] = &arrays_vec[num_array - 1]; if (curr_array_size > max_size) max_size = curr_array_size; } bool is_balanced = (max_size < 2 * (total_size / num_array) + 512); const Array1<T> **src = arrays.data(); BenchmarkStat stat; stat.op_name = "Append_" + std::to_string(num_array) + "_" + std::to_string(total_size) + "_" + std::to_string(total_size / num_array) + "_" + std::to_string(max_size) + "_" + std::to_string(is_balanced); int32_t num_iter = 20; stat.num_iter = num_iter; stat.problem_size = num_array; stat.dtype_name = TraitsOf(DtypeOf<T>::dtype).Name(); stat.device_type = device_type; stat.eplased_per_iter = BenchmarkOp( num_iter, context, (Array1<T>(*)(ContextPtr, int32_t, const Array1<T> **))(&Append<T>), context, num_array, src); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } static BenchmarkStat BenchmarkSpliceRowSplits(int32_t num_array, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } std::vector<Array1<int32_t>> arrays_vec(num_array); std::vector<const Array1<int32_t> *> arrays(num_array); int32_t total_size = 0, max_size = 0, total_num_elem = 0; // notice `j != num_array - 1` below, we may push a very long array // after the loop for (int32_t j = 0; j != num_array - 1; ++j) { int32_t num_elements = RandInt(0, 10000); total_num_elem += num_elements; RaggedShape shape = RandomRaggedShape(false, 2, 2, num_elements, num_elements); Array1<int32_t> row_splits = shape.RowSplits(1).To(context); int32_t array_size = row_splits.Dim(); total_size += array_size; arrays_vec[j] = row_splits; arrays[j] = &arrays_vec[j]; if (array_size > max_size) max_size = array_size; } { // below we may generate an array with very large size depend on the value // of RandInt(0,1) int32_t average_size = total_num_elem / num_array; int32_t num_elements = RandInt(0, 1) == 0 ? RandInt(0, 10000) : average_size * 10; RaggedShape shape = RandomRaggedShape(false, 2, 2, num_elements, num_elements); Array1<int32_t> row_splits = shape.RowSplits(1).To(context); int32_t array_size = row_splits.Dim(); total_size += array_size; arrays_vec[num_array - 1] = row_splits; arrays[num_array - 1] = &arrays_vec[num_array - 1]; if (array_size > max_size) max_size = array_size; } bool is_balanced = (max_size < 2 * (total_size / num_array) + 512); const Array1<int32_t> **src = arrays.data(); BenchmarkStat stat; stat.op_name = "SpliceRowSplits_" + std::to_string(num_array) + "_" + std::to_string(total_size) + "_" + std::to_string(total_size / num_array) + "_" + std::to_string(max_size) + "_" + std::to_string(is_balanced); int32_t num_iter = 20; stat.num_iter = num_iter; stat.problem_size = num_array; stat.device_type = device_type; stat.eplased_per_iter = BenchmarkOp( num_iter, context, (Array1<int32_t>(*)(int32_t, const Array1<int32_t> **))(&SpliceRowSplits), num_array, src); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } static BenchmarkStat BenchmarkSizesToMergeMap(int32_t num_src, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } std::vector<int32_t> sizes(num_src); int32_t tot_size = 0; for (int32_t n = 0; n != num_src; ++n) { int32_t cur_size = RandInt(0, 1000); sizes[n] = cur_size; tot_size += cur_size; } BenchmarkStat stat; stat.op_name = "SizesToMergeMap_" + std::to_string(num_src) + "_" + std::to_string(tot_size) + "_" + std::to_string(tot_size / num_src); int32_t num_iter = 20; stat.num_iter = num_iter; stat.problem_size = num_src; stat.device_type = device_type; stat.eplased_per_iter = BenchmarkOp( num_iter, context, (Array1<uint32_t>(*)(ContextPtr, const std::vector<int32_t> &))( &SizesToMergeMap), context, sizes); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } template <typename T> static void RegisterBenchmarkExclusiveSum(DeviceType device_type) { std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000, 5000, 10000, 100000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<T>("ExclusiveSum", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkExclusiveSum<T>(s, device_type); }); } } static void RegisterBenchmarkRowSplitsToRowIds(DeviceType device_type) { std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000, 5000, 10000, 100000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<int32_t>("RowSplitsToRowIds", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkRowSplitsToRowIds(s, device_type); }); } } static void RegisterBenchmarkRowIdsToRowSplits(DeviceType device_type) { std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000, 5000, 10000, 100000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<int32_t>("RowIdsToRowSplits", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkRowIdsToRowSplits(s, device_type); }); } } template <typename T> static void RegisterBenchmarkAppend(DeviceType device_type) { // problem_sizes here is the number of arrays to append std::vector<int32_t> problems_sizes = {10, 50, 100, 200, 500, 1000, 2000, 5000, 10000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<T>("Append", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkAppend<T>(s, device_type); }); } } static void RegisterBenchmarkSpliceRowSplits(DeviceType device_type) { // problem_sizes here is the number of arrays that we feed into // `SpliceRowSplits` std::vector<int32_t> problems_sizes = {10, 50, 100, 200, 500, 1000, 2000, 5000, 10000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<int32_t>("SpliceRowSplits", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkSpliceRowSplits(s, device_type); }); } } static void RegisterBenchmarkSizesToMergeMap(DeviceType device_type) { // problem_sizes here is the `sizes.size()` in // SizesToMergeMap(ContextPtr c, const std::vector<int32_t> sizes). std::vector<int32_t> problems_sizes = {3, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<int32_t>("SizesToMergeMap", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkSizesToMergeMap(s, device_type); }); } } static void RunArrayOpsBenchmark() { PrintEnvironmentInfo(); RegisterBenchmarkExclusiveSum<int32_t>(kCpu); RegisterBenchmarkExclusiveSum<int32_t>(kCuda); RegisterBenchmarkRowSplitsToRowIds(kCpu); RegisterBenchmarkRowSplitsToRowIds(kCuda); RegisterBenchmarkRowIdsToRowSplits(kCpu); RegisterBenchmarkRowIdsToRowSplits(kCuda); RegisterBenchmarkAppend<int32_t>(kCuda); RegisterBenchmarkSpliceRowSplits(kCuda); RegisterBenchmarkSizesToMergeMap(kCuda); // Users can set a regular expression via environment // variable `K2_BENCHMARK_FILTER` such that only benchmarks // with name matching the pattern are candidates to run. const char *filter = std::getenv("K2_BENCHMARK_FILTER"); if (filter != nullptr) FilterRegisteredBenchmarks(filter); std::vector<BenchmarkRun> results = RunBechmarks(); std::cout << BenchmarkRun::GetFieldsName() << "\n"; for (const auto &r : results) { std::cout << r << "\n"; } } } // namespace k2 int main() { k2::RunArrayOpsBenchmark(); return 0; }
488dfd6b81f1b6eb5a331dd21f471a271826a1f8.cu
/** * Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang) * * See LICENSE for clarification regarding multiple authors */ #include <cstdlib> #include "k2/csrc/array_ops.h" #include "k2/csrc/benchmark/benchmark.h" #include "k2/csrc/test_utils.h" namespace k2 { template <typename T> static BenchmarkStat BenchmarkExclusiveSum(int32_t dim, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } int32_t num_iter = std::min(500, 1000000 / dim); Array1<T> src = RandUniformArray1<T>(context, dim, -1000, 1000, GetSeed()); BenchmarkStat stat; stat.op_name = "ExclusiveSum"; stat.num_iter = num_iter; stat.problem_size = dim; stat.dtype_name = TraitsOf(DtypeOf<T>::dtype).Name(); stat.device_type = device_type; // there are overloads of ExclusiveSum, so we use an explicit conversion here. stat.eplased_per_iter = BenchmarkOp(num_iter, context, (Array1<T>(*)(const Array1<T> &))(&ExclusiveSum<T>), src); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } static BenchmarkStat BenchmarkRowSplitsToRowIds(int32_t dim, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } int32_t num_iter = std::min(500, 1000000 / dim); Array1<int32_t> sizes = RandUniformArray1<int32_t>(context, dim, 0, 1000, GetSeed()); Array1<int32_t> row_splits = ExclusiveSum(sizes); Array1<int32_t> row_ids(context, row_splits.Back()); BenchmarkStat stat; stat.op_name = "RowSplitsToRowIds_" + std::to_string(row_ids.Dim()); stat.num_iter = num_iter; stat.problem_size = dim; stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name(); stat.device_type = device_type; // there are overloads of RowSplitsToRowIds, // so we use an explicit conversion here. stat.eplased_per_iter = BenchmarkOp(num_iter, context, (void (*)(const Array1<int32_t> &, Array1<int32_t> *))( &RowSplitsToRowIds), row_splits, &row_ids); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } static BenchmarkStat BenchmarkRowIdsToRowSplits(int32_t dim, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } int32_t num_iter = std::min(500, 1000000 / dim); Array1<int32_t> sizes = RandUniformArray1<int32_t>(context, dim, 0, 1000, GetSeed()); Array1<int32_t> row_splits = ExclusiveSum(sizes); Array1<int32_t> row_ids(context, row_splits.Back()); RowSplitsToRowIds(row_splits, &row_ids); BenchmarkStat stat; stat.op_name = "RowIdsToRowSplits"; stat.num_iter = num_iter; stat.problem_size = dim; stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name(); stat.device_type = device_type; // there are overloads of RowIdsToRowSplits, // so we use an explicit conversion here. stat.eplased_per_iter = BenchmarkOp(num_iter, context, (void (*)(const Array1<int32_t> &, Array1<int32_t> *))( &RowIdsToRowSplits), row_ids, &row_splits); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } template <typename T> static BenchmarkStat BenchmarkAppend(int32_t num_array, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } std::vector<Array1<T>> arrays_vec(num_array); std::vector<const Array1<T> *> arrays(num_array); int32_t total_size = 0, max_size = 0; // notice `j != num_array - 1` below, we may push a very long array // after the loop for (int32_t j = 0; j != num_array - 1; ++j) { int32_t curr_array_size = RandInt(0, 10000); std::vector<T> data(curr_array_size); std::iota(data.begin(), data.end(), total_size); total_size += curr_array_size; arrays_vec[j] = Array1<T>(context, data); arrays[j] = &arrays_vec[j]; if (curr_array_size > max_size) max_size = curr_array_size; } { // below we may generate an array with very large size depend on the value // of RandInt(0,1) int32_t average_size = total_size / num_array; int32_t curr_array_size = RandInt(0, 1) == 0 ? RandInt(0, 10000) : average_size * 10; std::vector<T> data(curr_array_size); std::iota(data.begin(), data.end(), total_size); total_size += curr_array_size; arrays_vec[num_array - 1] = Array1<T>(context, data); arrays[num_array - 1] = &arrays_vec[num_array - 1]; if (curr_array_size > max_size) max_size = curr_array_size; } bool is_balanced = (max_size < 2 * (total_size / num_array) + 512); const Array1<T> **src = arrays.data(); BenchmarkStat stat; stat.op_name = "Append_" + std::to_string(num_array) + "_" + std::to_string(total_size) + "_" + std::to_string(total_size / num_array) + "_" + std::to_string(max_size) + "_" + std::to_string(is_balanced); int32_t num_iter = 20; stat.num_iter = num_iter; stat.problem_size = num_array; stat.dtype_name = TraitsOf(DtypeOf<T>::dtype).Name(); stat.device_type = device_type; stat.eplased_per_iter = BenchmarkOp( num_iter, context, (Array1<T>(*)(ContextPtr, int32_t, const Array1<T> **))(&Append<T>), context, num_array, src); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } static BenchmarkStat BenchmarkSpliceRowSplits(int32_t num_array, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } std::vector<Array1<int32_t>> arrays_vec(num_array); std::vector<const Array1<int32_t> *> arrays(num_array); int32_t total_size = 0, max_size = 0, total_num_elem = 0; // notice `j != num_array - 1` below, we may push a very long array // after the loop for (int32_t j = 0; j != num_array - 1; ++j) { int32_t num_elements = RandInt(0, 10000); total_num_elem += num_elements; RaggedShape shape = RandomRaggedShape(false, 2, 2, num_elements, num_elements); Array1<int32_t> row_splits = shape.RowSplits(1).To(context); int32_t array_size = row_splits.Dim(); total_size += array_size; arrays_vec[j] = row_splits; arrays[j] = &arrays_vec[j]; if (array_size > max_size) max_size = array_size; } { // below we may generate an array with very large size depend on the value // of RandInt(0,1) int32_t average_size = total_num_elem / num_array; int32_t num_elements = RandInt(0, 1) == 0 ? RandInt(0, 10000) : average_size * 10; RaggedShape shape = RandomRaggedShape(false, 2, 2, num_elements, num_elements); Array1<int32_t> row_splits = shape.RowSplits(1).To(context); int32_t array_size = row_splits.Dim(); total_size += array_size; arrays_vec[num_array - 1] = row_splits; arrays[num_array - 1] = &arrays_vec[num_array - 1]; if (array_size > max_size) max_size = array_size; } bool is_balanced = (max_size < 2 * (total_size / num_array) + 512); const Array1<int32_t> **src = arrays.data(); BenchmarkStat stat; stat.op_name = "SpliceRowSplits_" + std::to_string(num_array) + "_" + std::to_string(total_size) + "_" + std::to_string(total_size / num_array) + "_" + std::to_string(max_size) + "_" + std::to_string(is_balanced); int32_t num_iter = 20; stat.num_iter = num_iter; stat.problem_size = num_array; stat.device_type = device_type; stat.eplased_per_iter = BenchmarkOp( num_iter, context, (Array1<int32_t>(*)(int32_t, const Array1<int32_t> **))(&SpliceRowSplits), num_array, src); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } static BenchmarkStat BenchmarkSizesToMergeMap(int32_t num_src, DeviceType device_type) { ContextPtr context; if (device_type == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(device_type, kCuda); context = GetCudaContext(); } std::vector<int32_t> sizes(num_src); int32_t tot_size = 0; for (int32_t n = 0; n != num_src; ++n) { int32_t cur_size = RandInt(0, 1000); sizes[n] = cur_size; tot_size += cur_size; } BenchmarkStat stat; stat.op_name = "SizesToMergeMap_" + std::to_string(num_src) + "_" + std::to_string(tot_size) + "_" + std::to_string(tot_size / num_src); int32_t num_iter = 20; stat.num_iter = num_iter; stat.problem_size = num_src; stat.device_type = device_type; stat.eplased_per_iter = BenchmarkOp( num_iter, context, (Array1<uint32_t>(*)(ContextPtr, const std::vector<int32_t> &))( &SizesToMergeMap), context, sizes); stat.eplased_per_iter *= 1e6; // from seconds to microseconds return stat; } template <typename T> static void RegisterBenchmarkExclusiveSum(DeviceType device_type) { std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000, 5000, 10000, 100000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<T>("ExclusiveSum", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkExclusiveSum<T>(s, device_type); }); } } static void RegisterBenchmarkRowSplitsToRowIds(DeviceType device_type) { std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000, 5000, 10000, 100000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<int32_t>("RowSplitsToRowIds", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkRowSplitsToRowIds(s, device_type); }); } } static void RegisterBenchmarkRowIdsToRowSplits(DeviceType device_type) { std::vector<int32_t> problems_sizes = {100, 500, 1000, 2000, 5000, 10000, 100000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<int32_t>("RowIdsToRowSplits", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkRowIdsToRowSplits(s, device_type); }); } } template <typename T> static void RegisterBenchmarkAppend(DeviceType device_type) { // problem_sizes here is the number of arrays to append std::vector<int32_t> problems_sizes = {10, 50, 100, 200, 500, 1000, 2000, 5000, 10000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<T>("Append", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkAppend<T>(s, device_type); }); } } static void RegisterBenchmarkSpliceRowSplits(DeviceType device_type) { // problem_sizes here is the number of arrays that we feed into // `SpliceRowSplits` std::vector<int32_t> problems_sizes = {10, 50, 100, 200, 500, 1000, 2000, 5000, 10000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<int32_t>("SpliceRowSplits", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkSpliceRowSplits(s, device_type); }); } } static void RegisterBenchmarkSizesToMergeMap(DeviceType device_type) { // problem_sizes here is the `sizes.size()` in // SizesToMergeMap(ContextPtr c, const std::vector<int32_t> sizes). std::vector<int32_t> problems_sizes = {3, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000}; for (auto s : problems_sizes) { std::string name = GenerateBenchmarkName<int32_t>("SizesToMergeMap", device_type) + "_" + std::to_string(s); RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat { return BenchmarkSizesToMergeMap(s, device_type); }); } } static void RunArrayOpsBenchmark() { PrintEnvironmentInfo(); RegisterBenchmarkExclusiveSum<int32_t>(kCpu); RegisterBenchmarkExclusiveSum<int32_t>(kCuda); RegisterBenchmarkRowSplitsToRowIds(kCpu); RegisterBenchmarkRowSplitsToRowIds(kCuda); RegisterBenchmarkRowIdsToRowSplits(kCpu); RegisterBenchmarkRowIdsToRowSplits(kCuda); RegisterBenchmarkAppend<int32_t>(kCuda); RegisterBenchmarkSpliceRowSplits(kCuda); RegisterBenchmarkSizesToMergeMap(kCuda); // Users can set a regular expression via environment // variable `K2_BENCHMARK_FILTER` such that only benchmarks // with name matching the pattern are candidates to run. const char *filter = std::getenv("K2_BENCHMARK_FILTER"); if (filter != nullptr) FilterRegisteredBenchmarks(filter); std::vector<BenchmarkRun> results = RunBechmarks(); std::cout << BenchmarkRun::GetFieldsName() << "\n"; for (const auto &r : results) { std::cout << r << "\n"; } } } // namespace k2 int main() { k2::RunArrayOpsBenchmark(); return 0; }
40c396d57b9981003aa7fad928dedd6983592187.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /* * Initialize array values on the host. */ void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } /* * Double elements in parallel on the GPU. */ __global__ void doubleElements(int *a, int N) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { a[i] *= 2; } } /* * Check all elements have been doubled on the host. */ bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { int N = 100; int *a; size_t size = N * sizeof(int); /* * Refactor this memory allocation to provide a pointer * `a` that can be used on both the host and the device. */ // a = (int *)malloc(size); hipMallocManaged(&a, size); init(a, N); size_t threads_per_block = 10; size_t number_of_blocks = 10; /* * This launch will not work until the pointer `a` is also * available to the device. */ hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N); hipDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); /* * Refactor to free memory that has been allocated to be * accessed by both the host and the device. */ hipFree(a); }
40c396d57b9981003aa7fad928dedd6983592187.cu
#include <stdio.h> /* * Initialize array values on the host. */ void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } /* * Double elements in parallel on the GPU. */ __global__ void doubleElements(int *a, int N) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { a[i] *= 2; } } /* * Check all elements have been doubled on the host. */ bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { int N = 100; int *a; size_t size = N * sizeof(int); /* * Refactor this memory allocation to provide a pointer * `a` that can be used on both the host and the device. */ // a = (int *)malloc(size); cudaMallocManaged(&a, size); init(a, N); size_t threads_per_block = 10; size_t number_of_blocks = 10; /* * This launch will not work until the pointer `a` is also * available to the device. */ doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); cudaDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); /* * Refactor to free memory that has been allocated to be * accessed by both the host and the device. */ cudaFree(a); }
0f002f0af5d34957308986409705404ede313a4b.hip
// !!! This is a file automatically generated by hipify!!! #include "CUAPI.h" #include "CUFLU.h" #ifdef GPU extern real (*d_Flu_Array_F_In )[FLU_NIN ][ CUBE(FLU_NXT) ]; extern real (*d_Flu_Array_F_Out)[FLU_NOUT][ CUBE(PS2) ]; extern real (*d_Flux_Array)[9][NFLUX_TOTAL][ SQR(PS2) ]; #ifdef UNSPLIT_GRAVITY extern double (*d_Corner_Array_F)[3]; #endif #ifdef DUAL_ENERGY extern char (*d_DE_Array_F_Out)[ CUBE(PS2) ]; #endif extern real *d_dt_Array_T; extern real (*d_Flu_Array_T)[NCOMP_FLUID][ CUBE(PS1) ]; // global memory arrays in different models #if ( MODEL == HYDRO ) #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) extern real (*d_PriVar) [NCOMP_TOTAL][ CUBE(FLU_NXT) ]; extern real (*d_Slope_PPM)[3][NCOMP_TOTAL][ CUBE(N_SLOPE_PPM) ]; extern real (*d_FC_Var) [6][NCOMP_TOTAL][ CUBE(N_FC_VAR) ]; extern real (*d_FC_Flux) [3][NCOMP_TOTAL][ CUBE(N_FC_FLUX) ]; #endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) #elif ( MODEL == MHD ) #warning : WAIT MHD !!! #elif ( MODEL != ELBDM ) #warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ?? #endif // MODEL extern hipStream_t *Stream; //------------------------------------------------------------------------------------------------------- // Function : CUAPI_MemFree_Fluid // Description : Free the GPU and CPU memory previously allocated by CUAPI_MemAllocate_Fluid() // // Parameter : GPU_NStream : Number of CUDA streams for the asynchronous memory copy //------------------------------------------------------------------------------------------------------- void CUAPI_MemFree_Fluid( const int GPU_NStream ) { // free the device memory (used by all models) if ( d_Flu_Array_F_In != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Flu_Array_F_In ) ); d_Flu_Array_F_In = NULL; } if ( d_Flu_Array_F_Out != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Flu_Array_F_Out ) ); d_Flu_Array_F_Out = NULL; } if ( d_Flux_Array != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Flux_Array ) ); d_Flux_Array = NULL; } # ifdef UNSPLIT_GRAVITY if ( d_Corner_Array_F != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Corner_Array_F ) ); d_Corner_Array_F = NULL; } # endif # ifdef DUAL_ENERGY if ( d_DE_Array_F_Out != NULL ) { CUDA_CHECK_ERROR( hipFree( d_DE_Array_F_Out ) ); d_DE_Array_F_Out = NULL; } # endif if ( d_dt_Array_T != NULL ) { CUDA_CHECK_ERROR( hipFree( d_dt_Array_T ) ); d_dt_Array_T = NULL; } if ( d_Flu_Array_T != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Flu_Array_T ) ); d_Flu_Array_T = NULL; } // free the device memory (used by different models) # if ( MODEL == HYDRO ) # if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) if ( d_PriVar != NULL ) { CUDA_CHECK_ERROR( hipFree( d_PriVar ) ); d_PriVar = NULL; } if ( d_Slope_PPM != NULL ) { CUDA_CHECK_ERROR( hipFree( d_Slope_PPM ) ); d_Slope_PPM = NULL; } if ( d_FC_Var != NULL ) { CUDA_CHECK_ERROR( hipFree( d_FC_Var ) ); d_FC_Var = NULL; } if ( d_FC_Flux != NULL ) { CUDA_CHECK_ERROR( hipFree( d_FC_Flux ) ); d_FC_Flux = NULL; } # endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) # elif ( MODEL == MHD ) # warning : WAIT MHD !!! # elif ( MODEL != ELBDM ) # warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ?? # endif // MODEL // free the host memory allocated by CUDA for (int t=0; t<2; t++) { if ( h_Flu_Array_F_In [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Flu_Array_F_In [t] ) ); h_Flu_Array_F_In [t] = NULL; } if ( h_Flu_Array_F_Out[t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Flu_Array_F_Out[t] ) ); h_Flu_Array_F_Out[t] = NULL; } if ( h_Flux_Array [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Flux_Array [t] ) ); h_Flux_Array [t] = NULL; } # ifdef UNSPLIT_GRAVITY if ( h_Corner_Array_F [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Corner_Array_F [t] ) ); h_Corner_Array_F [t] = NULL; } # endif # ifdef DUAL_ENERGY if ( h_DE_Array_F_Out [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_DE_Array_F_Out [t] ) ); h_DE_Array_F_Out [t] = NULL; } # endif if ( h_dt_Array_T [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_dt_Array_T [t] ) ); h_dt_Array_T [t] = NULL; } if ( h_Flu_Array_T [t] != NULL ) { CUDA_CHECK_ERROR( hipHostFree( h_Flu_Array_T [t] ) ); h_Flu_Array_T [t] = NULL; } } // for (int t=0; t<2; t++) // destroy streams if ( Stream != NULL ) { for (int s=0; s<GPU_NStream; s++) CUDA_CHECK_ERROR( hipStreamDestroy( Stream[s] ) ); delete [] Stream; Stream = NULL; } } // FUNCTION : CUAPI_MemFree_Fluid #endif // #ifdef GPU
0f002f0af5d34957308986409705404ede313a4b.cu
#include "CUAPI.h" #include "CUFLU.h" #ifdef GPU extern real (*d_Flu_Array_F_In )[FLU_NIN ][ CUBE(FLU_NXT) ]; extern real (*d_Flu_Array_F_Out)[FLU_NOUT][ CUBE(PS2) ]; extern real (*d_Flux_Array)[9][NFLUX_TOTAL][ SQR(PS2) ]; #ifdef UNSPLIT_GRAVITY extern double (*d_Corner_Array_F)[3]; #endif #ifdef DUAL_ENERGY extern char (*d_DE_Array_F_Out)[ CUBE(PS2) ]; #endif extern real *d_dt_Array_T; extern real (*d_Flu_Array_T)[NCOMP_FLUID][ CUBE(PS1) ]; // global memory arrays in different models #if ( MODEL == HYDRO ) #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) extern real (*d_PriVar) [NCOMP_TOTAL][ CUBE(FLU_NXT) ]; extern real (*d_Slope_PPM)[3][NCOMP_TOTAL][ CUBE(N_SLOPE_PPM) ]; extern real (*d_FC_Var) [6][NCOMP_TOTAL][ CUBE(N_FC_VAR) ]; extern real (*d_FC_Flux) [3][NCOMP_TOTAL][ CUBE(N_FC_FLUX) ]; #endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) #elif ( MODEL == MHD ) #warning : WAIT MHD !!! #elif ( MODEL != ELBDM ) #warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ?? #endif // MODEL extern cudaStream_t *Stream; //------------------------------------------------------------------------------------------------------- // Function : CUAPI_MemFree_Fluid // Description : Free the GPU and CPU memory previously allocated by CUAPI_MemAllocate_Fluid() // // Parameter : GPU_NStream : Number of CUDA streams for the asynchronous memory copy //------------------------------------------------------------------------------------------------------- void CUAPI_MemFree_Fluid( const int GPU_NStream ) { // free the device memory (used by all models) if ( d_Flu_Array_F_In != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Flu_Array_F_In ) ); d_Flu_Array_F_In = NULL; } if ( d_Flu_Array_F_Out != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Flu_Array_F_Out ) ); d_Flu_Array_F_Out = NULL; } if ( d_Flux_Array != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Flux_Array ) ); d_Flux_Array = NULL; } # ifdef UNSPLIT_GRAVITY if ( d_Corner_Array_F != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Corner_Array_F ) ); d_Corner_Array_F = NULL; } # endif # ifdef DUAL_ENERGY if ( d_DE_Array_F_Out != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_DE_Array_F_Out ) ); d_DE_Array_F_Out = NULL; } # endif if ( d_dt_Array_T != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_dt_Array_T ) ); d_dt_Array_T = NULL; } if ( d_Flu_Array_T != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Flu_Array_T ) ); d_Flu_Array_T = NULL; } // free the device memory (used by different models) # if ( MODEL == HYDRO ) # if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) if ( d_PriVar != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_PriVar ) ); d_PriVar = NULL; } if ( d_Slope_PPM != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_Slope_PPM ) ); d_Slope_PPM = NULL; } if ( d_FC_Var != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_FC_Var ) ); d_FC_Var = NULL; } if ( d_FC_Flux != NULL ) { CUDA_CHECK_ERROR( cudaFree( d_FC_Flux ) ); d_FC_Flux = NULL; } # endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) # elif ( MODEL == MHD ) # warning : WAIT MHD !!! # elif ( MODEL != ELBDM ) # warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ?? # endif // MODEL // free the host memory allocated by CUDA for (int t=0; t<2; t++) { if ( h_Flu_Array_F_In [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Flu_Array_F_In [t] ) ); h_Flu_Array_F_In [t] = NULL; } if ( h_Flu_Array_F_Out[t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Flu_Array_F_Out[t] ) ); h_Flu_Array_F_Out[t] = NULL; } if ( h_Flux_Array [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Flux_Array [t] ) ); h_Flux_Array [t] = NULL; } # ifdef UNSPLIT_GRAVITY if ( h_Corner_Array_F [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Corner_Array_F [t] ) ); h_Corner_Array_F [t] = NULL; } # endif # ifdef DUAL_ENERGY if ( h_DE_Array_F_Out [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_DE_Array_F_Out [t] ) ); h_DE_Array_F_Out [t] = NULL; } # endif if ( h_dt_Array_T [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_dt_Array_T [t] ) ); h_dt_Array_T [t] = NULL; } if ( h_Flu_Array_T [t] != NULL ) { CUDA_CHECK_ERROR( cudaFreeHost( h_Flu_Array_T [t] ) ); h_Flu_Array_T [t] = NULL; } } // for (int t=0; t<2; t++) // destroy streams if ( Stream != NULL ) { for (int s=0; s<GPU_NStream; s++) CUDA_CHECK_ERROR( cudaStreamDestroy( Stream[s] ) ); delete [] Stream; Stream = NULL; } } // FUNCTION : CUAPI_MemFree_Fluid #endif // #ifdef GPU
36a64abab94775db0028267cc7bac50d4e1d9c4b.hip
// !!! This is a file automatically generated by hipify!!! /** general test, here could be anything **/ #include <minlin/minlin.h> #include <minlin/modules/threx/threx.h> #include <iostream> #include <stdio.h> /* printf in cuda */ #include <stdlib.h> /* atoi, strtol */ #include <limits> /* max value of double/float */ #include <omp.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <hip/device_functions.h> #include <typeinfo> using namespace minlin::threx; MINLIN_INIT void fun_with_all(minlin::detail::all_type add_in){ std::cout << "it works!" << std::endl; } int main ( int argc, char *argv[] ) { /* what the hell is "all"? */ std::cout << "type of all: "; std::cout << typeid(all).name(); std::cout << std::endl; fun_with_all(all); }
36a64abab94775db0028267cc7bac50d4e1d9c4b.cu
/** general test, here could be anything **/ #include <minlin/minlin.h> #include <minlin/modules/threx/threx.h> #include <iostream> #include <stdio.h> /* printf in cuda */ #include <stdlib.h> /* atoi, strtol */ #include <limits> /* max value of double/float */ #include <omp.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <device_functions.h> #include <typeinfo> using namespace minlin::threx; MINLIN_INIT void fun_with_all(minlin::detail::all_type add_in){ std::cout << "it works!" << std::endl; } int main ( int argc, char *argv[] ) { /* what the hell is "all"? */ std::cout << "type of all: "; std::cout << typeid(all).name(); std::cout << std::endl; fun_with_all(all); }
35d9b8e1335562b263ad6598607de60843cda3c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "deflate.h" #include <string.h> #include <stdio.h> /* =========================================================================== * Set match_start to the longest match starting at the given string and * return its length. Matches shorter or equal to prev_length are discarded, * in which case the result is equal to prev_length and match_start is * garbage. * IN assertions: cur_match is the head of the hash chain for the current * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 * OUT assertion: the match length is not greater than s->lookahead. */ /* #ifndef ASMV */ /* Disabled */ /* For 80x86 and 680x0, an optimized version will be provided in match.asm or * match.S. The code will be functionally equivalent. */ __global__ void longest_match_ (deflate_state * s, IPos cur_match, int *cuda_return, Bytef *window , Posf *prev) { unsigned chain_length = s->max_chain_length; /* max hash chain length */ register Bytef *scan = window + s->strstart; /* current string */ register Bytef *match; /* matched string */ register int len; /* length of current match */ int best_len = s->prev_length; /* best match length so far */ int nice_match = s->nice_match; /* stop if match long enough */ IPos limit = s->strstart > (IPos) MAX_DIST (s) ? s->strstart - (IPos) MAX_DIST (s) : Z_NULL; /* Stop when cur_match becomes <= limit. To simplify the code, * we prevent matches with the string of window index 0. */ //Posf *prev = s->prev; uInt wmask = s->w_mask; #ifdef UNALIGNED_OK /* Compare two bytes at a time. Note: this is not always beneficial. * Try with and without -DUNALIGNED_OK to check. */ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; register ush scan_start = *(ushf *) scan; register ush scan_end = *(ushf *) (scan + best_len - 1); #else register Bytef *strend = s->window + s->strstart + MAX_MATCH; register Byte scan_end1 = scan[best_len - 1]; register Byte scan_end = scan[best_len]; #endif /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. * It is easy to get rid of this optimization if necessary. */ /*Assert (s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");*/ /* Do not waste too much time if we already have a good match: */ if (s->prev_length >= s->good_match) { chain_length >>= 2; } /* Do not look for matches beyond the end of the input. This is necessary * to make deflate deterministic. */ if ((uInt) nice_match > s->lookahead) nice_match = s->lookahead; /* Assert ((ulg) s->strstart <= s->window_size - MIN_LOOKAHEAD, "need lookahead"); */ do { /* Assert (cur_match < s->strstart, "no future"); */ match = s->window + cur_match; /* Skip to next match if the match length cannot increase * or if the match length is less than 2. Note that the checks below * for insufficient lookahead only occur occasionally for performance * reasons. Therefore uninitialized memory will be accessed, and * conditional jumps will be made that depend on those values. * However the length of the match is limited to the lookahead, so * the output of deflate is not affected by the uninitialized values. */ #if (defined(UNALIGNED_OK) && MAX_MATCH == 258) /* This code assumes sizeof(unsigned short) == 2. Do not use * UNALIGNED_OK if your compiler uses a different size. */ if (*(ushf *) (match + best_len - 1) != scan_end || *(ushf *) match != scan_start) continue; /* It is not necessary to compare scan[2] and match[2] since they are * always equal when the other bytes match, given that the hash keys * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at * strstart+3, +5, ... up to strstart+257. We check for insufficient * lookahead only every 4th comparison; the 128th check will be made * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is * necessary to put more guard bytes at the end of the window, or * to check more often for insufficient lookahead. */ /* Assert (scan[2] == match[2], "scan[2]?"); */ scan++, match++; do { } while (*(ushf *) (scan += 2) == *(ushf *) (match += 2) && *(ushf *) (scan += 2) == *(ushf *) (match += 2) && *(ushf *) (scan += 2) == *(ushf *) (match += 2) && *(ushf *) (scan += 2) == *(ushf *) (match += 2) && scan < strend); /* The funny "do {}" generates better code on most compilers */ /* Here, scan <= window+strstart+257 */ /* Assert (scan <= s->window + (unsigned) (s->window_size - 1), "wild scan"); */ if (*scan == *match) scan++; len = (MAX_MATCH - 1) - (int) (strend - scan); scan = strend - (MAX_MATCH - 1); #else /* UNALIGNED_OK */ if (match[best_len] != scan_end || match[best_len - 1] != scan_end1 || *match != *scan || *++match != scan[1]) continue; /* The check at best_len-1 can be removed because it will be made * again later. (This heuristic is not always a win.) * It is not necessary to compare scan[2] and match[2] since they * are always equal when the other bytes match, given that * the hash keys are equal and that HASH_BITS >= 8. */ scan += 2, match++; /* Assert (*scan == *match, "match[2]?"); */ /* We check for insufficient lookahead only every 8th comparison; * the 256th check will be made at strstart+258. */ do { } while (*++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && scan < strend); /* Assert (scan <= s->window + (unsigned) (s->window_size - 1), "wild scan");*/ len = MAX_MATCH - (int) (strend - scan); scan = strend - MAX_MATCH; #endif /* UNALIGNED_OK */ if (len > best_len) { s->match_start = cur_match; best_len = len; if (len >= nice_match) break; #ifdef UNALIGNED_OK scan_end = *(ushf *) (scan + best_len - 1); #else scan_end1 = scan[best_len - 1]; scan_end = scan[best_len]; #endif } } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length != 0); if ((uInt) best_len <= s->lookahead) *cuda_return = (uInt) best_len; else *cuda_return = s->lookahead; } int longest_match_cuda(deflate_state * s, IPos cur_match) { deflate_state *state; Bytef *window; Posf *prev; int *cuda_return; hipMalloc(&state, sizeof(deflate_state)); hipMalloc(&prev, s->w_size*sizeof(Posf)); hipMalloc(&window, s->w_size*2*sizeof(Bytef)); hipMalloc(&cuda_return, sizeof(int)); hipMemcpy(prev, s->prev, s->w_size*sizeof(Posf), hipMemcpyHostToDevice); hipMemcpy(state, s, sizeof(deflate_state), hipMemcpyHostToDevice); hipMemcpy(window, s->window,s->w_size*2*sizeof(Bytef) , hipMemcpyHostToDevice); hipLaunchKernelGGL(( longest_match_), dim3(1),dim3(1), 0, 0, state, cur_match, cuda_return, window, prev); int host_return[1]; hipMemcpy (host_return, cuda_return, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy (s, state, sizeof(deflate_state), hipMemcpyDeviceToHost); hipFree(state); hipFree(prev); hipFree(window); return host_return[0]; }
35d9b8e1335562b263ad6598607de60843cda3c8.cu
#include "deflate.h" #include <string.h> #include <stdio.h> /* =========================================================================== * Set match_start to the longest match starting at the given string and * return its length. Matches shorter or equal to prev_length are discarded, * in which case the result is equal to prev_length and match_start is * garbage. * IN assertions: cur_match is the head of the hash chain for the current * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 * OUT assertion: the match length is not greater than s->lookahead. */ /* #ifndef ASMV */ /* Disabled */ /* For 80x86 and 680x0, an optimized version will be provided in match.asm or * match.S. The code will be functionally equivalent. */ __global__ void longest_match_ (deflate_state * s, IPos cur_match, int *cuda_return, Bytef *window , Posf *prev) { unsigned chain_length = s->max_chain_length; /* max hash chain length */ register Bytef *scan = window + s->strstart; /* current string */ register Bytef *match; /* matched string */ register int len; /* length of current match */ int best_len = s->prev_length; /* best match length so far */ int nice_match = s->nice_match; /* stop if match long enough */ IPos limit = s->strstart > (IPos) MAX_DIST (s) ? s->strstart - (IPos) MAX_DIST (s) : Z_NULL; /* Stop when cur_match becomes <= limit. To simplify the code, * we prevent matches with the string of window index 0. */ //Posf *prev = s->prev; uInt wmask = s->w_mask; #ifdef UNALIGNED_OK /* Compare two bytes at a time. Note: this is not always beneficial. * Try with and without -DUNALIGNED_OK to check. */ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; register ush scan_start = *(ushf *) scan; register ush scan_end = *(ushf *) (scan + best_len - 1); #else register Bytef *strend = s->window + s->strstart + MAX_MATCH; register Byte scan_end1 = scan[best_len - 1]; register Byte scan_end = scan[best_len]; #endif /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. * It is easy to get rid of this optimization if necessary. */ /*Assert (s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");*/ /* Do not waste too much time if we already have a good match: */ if (s->prev_length >= s->good_match) { chain_length >>= 2; } /* Do not look for matches beyond the end of the input. This is necessary * to make deflate deterministic. */ if ((uInt) nice_match > s->lookahead) nice_match = s->lookahead; /* Assert ((ulg) s->strstart <= s->window_size - MIN_LOOKAHEAD, "need lookahead"); */ do { /* Assert (cur_match < s->strstart, "no future"); */ match = s->window + cur_match; /* Skip to next match if the match length cannot increase * or if the match length is less than 2. Note that the checks below * for insufficient lookahead only occur occasionally for performance * reasons. Therefore uninitialized memory will be accessed, and * conditional jumps will be made that depend on those values. * However the length of the match is limited to the lookahead, so * the output of deflate is not affected by the uninitialized values. */ #if (defined(UNALIGNED_OK) && MAX_MATCH == 258) /* This code assumes sizeof(unsigned short) == 2. Do not use * UNALIGNED_OK if your compiler uses a different size. */ if (*(ushf *) (match + best_len - 1) != scan_end || *(ushf *) match != scan_start) continue; /* It is not necessary to compare scan[2] and match[2] since they are * always equal when the other bytes match, given that the hash keys * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at * strstart+3, +5, ... up to strstart+257. We check for insufficient * lookahead only every 4th comparison; the 128th check will be made * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is * necessary to put more guard bytes at the end of the window, or * to check more often for insufficient lookahead. */ /* Assert (scan[2] == match[2], "scan[2]?"); */ scan++, match++; do { } while (*(ushf *) (scan += 2) == *(ushf *) (match += 2) && *(ushf *) (scan += 2) == *(ushf *) (match += 2) && *(ushf *) (scan += 2) == *(ushf *) (match += 2) && *(ushf *) (scan += 2) == *(ushf *) (match += 2) && scan < strend); /* The funny "do {}" generates better code on most compilers */ /* Here, scan <= window+strstart+257 */ /* Assert (scan <= s->window + (unsigned) (s->window_size - 1), "wild scan"); */ if (*scan == *match) scan++; len = (MAX_MATCH - 1) - (int) (strend - scan); scan = strend - (MAX_MATCH - 1); #else /* UNALIGNED_OK */ if (match[best_len] != scan_end || match[best_len - 1] != scan_end1 || *match != *scan || *++match != scan[1]) continue; /* The check at best_len-1 can be removed because it will be made * again later. (This heuristic is not always a win.) * It is not necessary to compare scan[2] and match[2] since they * are always equal when the other bytes match, given that * the hash keys are equal and that HASH_BITS >= 8. */ scan += 2, match++; /* Assert (*scan == *match, "match[2]?"); */ /* We check for insufficient lookahead only every 8th comparison; * the 256th check will be made at strstart+258. */ do { } while (*++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && scan < strend); /* Assert (scan <= s->window + (unsigned) (s->window_size - 1), "wild scan");*/ len = MAX_MATCH - (int) (strend - scan); scan = strend - MAX_MATCH; #endif /* UNALIGNED_OK */ if (len > best_len) { s->match_start = cur_match; best_len = len; if (len >= nice_match) break; #ifdef UNALIGNED_OK scan_end = *(ushf *) (scan + best_len - 1); #else scan_end1 = scan[best_len - 1]; scan_end = scan[best_len]; #endif } } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length != 0); if ((uInt) best_len <= s->lookahead) *cuda_return = (uInt) best_len; else *cuda_return = s->lookahead; } int longest_match_cuda(deflate_state * s, IPos cur_match) { deflate_state *state; Bytef *window; Posf *prev; int *cuda_return; cudaMalloc(&state, sizeof(deflate_state)); cudaMalloc(&prev, s->w_size*sizeof(Posf)); cudaMalloc(&window, s->w_size*2*sizeof(Bytef)); cudaMalloc(&cuda_return, sizeof(int)); cudaMemcpy(prev, s->prev, s->w_size*sizeof(Posf), cudaMemcpyHostToDevice); cudaMemcpy(state, s, sizeof(deflate_state), cudaMemcpyHostToDevice); cudaMemcpy(window, s->window,s->w_size*2*sizeof(Bytef) , cudaMemcpyHostToDevice); longest_match_<<<1,1>>>(state, cur_match, cuda_return, window, prev); int host_return[1]; cudaMemcpy (host_return, cuda_return, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy (s, state, sizeof(deflate_state), cudaMemcpyDeviceToHost); cudaFree(state); cudaFree(prev); cudaFree(window); return host_return[0]; }
2dbcfdd7ca95301de43a9ec116bfc858d6db818d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <getopt.h> #include <string> #include <vector> #include <time.h> #include "saxpy.h" #include "common.h" double timeKernelAvg = 0.0; double timeCopyH2DAvg = 0.0; double timeCopyD2HAvg = 0.0; double totalTimeAvg = 0.0; // return GB/s double toBW(long bytes, float sec) { return static_cast<double>(bytes) / (1024. * 1024. * 1024.) / sec; } void saxpyCpu(long N, float alpha, float* x, float* y, float* result) { for (long index=0; index<N; index++) result[index] = alpha * x[index] + y[index]; } bool check_saxpy(long N, float* a, float* b) { printf("%s\n", __func__); std::vector<long> diffs; for (long index=0; index<N; index++) { if (a[index] != b[index]) diffs.push_back(index); } if (diffs.size() > 0) { MYDEBUG("%s done\n", __func__); for (unsigned int i=0; i<diffs.size(); i++) { int idx = diffs[i]; MYDEBUG("[%16d] %10.3f != %10.3f (%e)\n", idx, a[idx], b[idx], a[idx] - b[idx]); } MYDEBUG(" failed #: %zu\n", diffs.size()); return false; } else { return true; } } void usage(const char* progname) { printf("Usage: %s [options]\n", progname); printf("Program Options:\n"); printf(" -n --arraysize <INT> Number of elements in arrays\n"); printf(" -p --partitions <INT> Number of partitions for the array\n"); printf(" -i --iterations <INT> Number of iterations for statistics\n"); printf(" -? --help This message\n"); } int main(int argc, char** argv) { long total_elems = 512 * 1024 * 1024; int partitions = 1; int iterations = 1; // parse commandline options //////////////////////////////////////////// int opt; int cs; static struct option long_options[] = { {"arraysize", 1, 0, 'n'}, {"partitions", 1, 0, 'p'}, {"iterations", 1, 0, 'i'}, {"help", 0, 0, '?'}, {0 ,0, 0, 0} }; while ((opt = getopt_long(argc, argv, "c:n:p:i:", long_options, NULL)) != EOF) { switch (opt) { case 'n': total_elems = atol(optarg); break; case 'p': partitions = atoi(optarg); break; case 'i': iterations = atoi(optarg); break; case 'c': cs = atoi(optarg); break; default: usage(argv[0]); return 1; } } // end parsing of commandline options ////////////////////////////////////// const float alpha = 2.0f; float* xarray = NULL; float* yarray = NULL; float* resultarray = NULL; // // TODO: allocate host-side memory // if (cs == 1) { xarray = (float*) malloc (total_elems*sizeof(float)); yarray = (float*) malloc (total_elems*sizeof(float)); resultarray = (float*) calloc (total_elems, sizeof(float)); } if (cs == 2) { hipMallocManaged(&xarray, total_elems*sizeof(float)); hipMallocManaged(&yarray, total_elems*sizeof(float)); hipMallocManaged(&resultarray, total_elems*sizeof(float)); } if (cs == 3) { hipHostMalloc(&xarray, total_elems*sizeof(float)); hipHostMalloc(&yarray, total_elems*sizeof(float)); hipHostMalloc(&resultarray, total_elems*sizeof(float)); } // // TODO: initialize input arrays // srand(time(NULL)); for (long i=0; i<total_elems; i++) { xarray[i] = rand() / 100; yarray[i] = rand() / 100; } printCudaInfo(); for (int i=0; i<iterations; i++) { saxpyCuda(total_elems, alpha, xarray, yarray, resultarray, partitions); } totalTimeAvg /= iterations; timeKernelAvg /= iterations; timeCopyH2DAvg /= iterations; timeCopyD2HAvg /= iterations; const long totalBytes = sizeof(float) * 3 * total_elems; printf("Number of bytes of one array: %ld\n", sizeof(float) * total_elems); printf("Overall time : %8.3lf ms [%8.3lf GB/s ]\n", 1000.f * totalTimeAvg, toBW(totalBytes, totalTimeAvg)); printf("GPU Kernel : %8.3lf ms [%8.3lf Ops/s]\n", 1000.f * timeKernelAvg, toBW(totalBytes/3, timeKernelAvg)); printf("Copy CPU->GPU: %8.3lf ms [%8.3lf GB/s ]\n", 1000.f * timeCopyH2DAvg, toBW(totalBytes/3*2, timeCopyH2DAvg)); printf("Copy CPU<-GPU: %8.3lf ms [%8.3lf GB/s ]\n", 1000.f * timeCopyD2HAvg, toBW(totalBytes/3, timeCopyD2HAvg)); if (resultarray != NULL) { float* resultrefer = new float[total_elems](); saxpyCpu(total_elems, alpha, xarray, yarray, resultrefer); if (check_saxpy(total_elems, resultarray, resultrefer)) { printf("Test succeeded\n"); } else { printf("Test failed\n"); } } // // TODO: deallocate host-side memory // if (cs == 1) { delete [] xarray; delete [] yarray; delete [] resultarray; } if (cs == 2) { hipFree(xarray); hipFree(yarray); hipFree(resultarray); } return 0; }
2dbcfdd7ca95301de43a9ec116bfc858d6db818d.cu
#include <stdlib.h> #include <stdio.h> #include <getopt.h> #include <string> #include <vector> #include <time.h> #include "saxpy.h" #include "common.h" double timeKernelAvg = 0.0; double timeCopyH2DAvg = 0.0; double timeCopyD2HAvg = 0.0; double totalTimeAvg = 0.0; // return GB/s double toBW(long bytes, float sec) { return static_cast<double>(bytes) / (1024. * 1024. * 1024.) / sec; } void saxpyCpu(long N, float alpha, float* x, float* y, float* result) { for (long index=0; index<N; index++) result[index] = alpha * x[index] + y[index]; } bool check_saxpy(long N, float* a, float* b) { printf("%s\n", __func__); std::vector<long> diffs; for (long index=0; index<N; index++) { if (a[index] != b[index]) diffs.push_back(index); } if (diffs.size() > 0) { MYDEBUG("%s done\n", __func__); for (unsigned int i=0; i<diffs.size(); i++) { int idx = diffs[i]; MYDEBUG("[%16d] %10.3f != %10.3f (%e)\n", idx, a[idx], b[idx], a[idx] - b[idx]); } MYDEBUG(" failed #: %zu\n", diffs.size()); return false; } else { return true; } } void usage(const char* progname) { printf("Usage: %s [options]\n", progname); printf("Program Options:\n"); printf(" -n --arraysize <INT> Number of elements in arrays\n"); printf(" -p --partitions <INT> Number of partitions for the array\n"); printf(" -i --iterations <INT> Number of iterations for statistics\n"); printf(" -? --help This message\n"); } int main(int argc, char** argv) { long total_elems = 512 * 1024 * 1024; int partitions = 1; int iterations = 1; // parse commandline options //////////////////////////////////////////// int opt; int cs; static struct option long_options[] = { {"arraysize", 1, 0, 'n'}, {"partitions", 1, 0, 'p'}, {"iterations", 1, 0, 'i'}, {"help", 0, 0, '?'}, {0 ,0, 0, 0} }; while ((opt = getopt_long(argc, argv, "c:n:p:i:", long_options, NULL)) != EOF) { switch (opt) { case 'n': total_elems = atol(optarg); break; case 'p': partitions = atoi(optarg); break; case 'i': iterations = atoi(optarg); break; case 'c': cs = atoi(optarg); break; default: usage(argv[0]); return 1; } } // end parsing of commandline options ////////////////////////////////////// const float alpha = 2.0f; float* xarray = NULL; float* yarray = NULL; float* resultarray = NULL; // // TODO: allocate host-side memory // if (cs == 1) { xarray = (float*) malloc (total_elems*sizeof(float)); yarray = (float*) malloc (total_elems*sizeof(float)); resultarray = (float*) calloc (total_elems, sizeof(float)); } if (cs == 2) { cudaMallocManaged(&xarray, total_elems*sizeof(float)); cudaMallocManaged(&yarray, total_elems*sizeof(float)); cudaMallocManaged(&resultarray, total_elems*sizeof(float)); } if (cs == 3) { cudaMallocHost(&xarray, total_elems*sizeof(float)); cudaMallocHost(&yarray, total_elems*sizeof(float)); cudaMallocHost(&resultarray, total_elems*sizeof(float)); } // // TODO: initialize input arrays // srand(time(NULL)); for (long i=0; i<total_elems; i++) { xarray[i] = rand() / 100; yarray[i] = rand() / 100; } printCudaInfo(); for (int i=0; i<iterations; i++) { saxpyCuda(total_elems, alpha, xarray, yarray, resultarray, partitions); } totalTimeAvg /= iterations; timeKernelAvg /= iterations; timeCopyH2DAvg /= iterations; timeCopyD2HAvg /= iterations; const long totalBytes = sizeof(float) * 3 * total_elems; printf("Number of bytes of one array: %ld\n", sizeof(float) * total_elems); printf("Overall time : %8.3lf ms [%8.3lf GB/s ]\n", 1000.f * totalTimeAvg, toBW(totalBytes, totalTimeAvg)); printf("GPU Kernel : %8.3lf ms [%8.3lf Ops/s]\n", 1000.f * timeKernelAvg, toBW(totalBytes/3, timeKernelAvg)); printf("Copy CPU->GPU: %8.3lf ms [%8.3lf GB/s ]\n", 1000.f * timeCopyH2DAvg, toBW(totalBytes/3*2, timeCopyH2DAvg)); printf("Copy CPU<-GPU: %8.3lf ms [%8.3lf GB/s ]\n", 1000.f * timeCopyD2HAvg, toBW(totalBytes/3, timeCopyD2HAvg)); if (resultarray != NULL) { float* resultrefer = new float[total_elems](); saxpyCpu(total_elems, alpha, xarray, yarray, resultrefer); if (check_saxpy(total_elems, resultarray, resultrefer)) { printf("Test succeeded\n"); } else { printf("Test failed\n"); } } // // TODO: deallocate host-side memory // if (cs == 1) { delete [] xarray; delete [] yarray; delete [] resultarray; } if (cs == 2) { cudaFree(xarray); cudaFree(yarray); cudaFree(resultarray); } return 0; }
f0cdf3ba5236faf6d7cf9aea95c22ee6f3db722a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/SpatialDepthwiseConvolution.hip" #else void THNN_(SpatialDepthwiseConvolution_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight, THCTensor *bias, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) { #if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__) TORCH_CHECK(false, "SpatialDepthwiseConvolution_updateOutput not suppported with BFloat16"); #else THCUNN_assertSameGPU(state, 3, input, output, weight); // Only handle 4D Input Tensors for now THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4); THAssert(!weight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, weight) == 4); // We assume that the input and weight Tensors are shaped properly by // the caller, so we verify that here to some extent // Weight Tensor is shape (output_channels, 1, kH, kW) THAssert(weight->size(1) == 1); // Input Tensor is shape (N, input_channels, H, W) // We verify that the # of output_channels is a multiple of input_channels THAssert(weight->size(0) % input->size(1) == 0); // Bias has same # of channels as output if (bias) { THAssert(THTensor_sizeLegacyNoScalars(bias, 0) == weight->size(0)); } input = THCTensor_(newContiguous)(state, input); weight = THCTensor_(newContiguous)(state, weight); bias = bias ? THCTensor_(newContiguous)(state, bias) : bias; // Following the behavior of other THCUNN functions, we shape the output // Tensor ourselves int batchSize = input->size(0); int height = input->size(2); int width = input->size(3); int outputHeight = (height + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; int outputWidth = (width + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; int outputChannels = weight->size(0); THCTensor_(resize4d)(state, output, batchSize, outputChannels, outputHeight, outputWidth); // Create THCDeviceTensor // Kernel currently relies upon all the Tensors to be contiguous, but we made // them contiguous above THCDeviceTensor<scalar_t, 4> dInput = toDeviceTensor<scalar_t, 4>(state, input); THCDeviceTensor<scalar_t, 4> dWeight = toDeviceTensor<scalar_t, 4>(state, weight); THCDeviceTensor<scalar_t, 4> dOutput = toDeviceTensor<scalar_t, 4>(state, output); THCDeviceTensor<scalar_t, 1> dBias; if (bias) { dBias = toDeviceTensor<scalar_t, 1>(state, bias); } int inputChannels = input->size(1); int depthwiseMultiplier = outputChannels / inputChannels; // One thread per output value int64_t n = THCTensor_(nElement)(state, output); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); if (kW == 3 && kH == 3) { hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateOutput<scalar_t, accreal, unsigned int, 3>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (kW == 1 && kH == 1) { hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateOutput<scalar_t, accreal, unsigned int, 1>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateOutput<scalar_t, accreal, unsigned int, 0>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } THCudaCheck(hipGetLastError()); THCTensor_(free)(state, input); THCTensor_(free)(state, weight); if (bias) THCTensor_(free)(state, bias); #endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__ } void THNN_(SpatialDepthwiseConvolution_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *weight, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) { #if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__) TORCH_CHECK(false, "SpatialDepthwiseConvolution_updateGradInput not suppported with BFloat16"); #else THCUNN_assertSameGPU(state, 3, gradOutput, gradInput, weight); // Only handle 4D Input Tensors for now THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4); THAssert(!weight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, weight) == 4); THAssert(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 4); // Minimal shape checking, as above // Same # of elements in batch THAssert(input->size(0) == gradOutput->size(0)); // Same # of filters as outputChannels THAssert(weight->size(0) == gradOutput->size(1)); weight = THCTensor_(newContiguous)(state, weight); gradOutput = THCTensor_(newContiguous)(state, gradOutput); // Resize GradInput THCTensor_(resizeAs)(state, gradInput, input); int inputChannels = input->size(1); int height = input->size(2); int width = input->size(3); int outputChannels = gradOutput->size(1); int outputHeight = gradOutput->size(2); int outputWidth = gradOutput->size(3); int depthwiseMultiplier = outputChannels / inputChannels; THCDeviceTensor<scalar_t, 4> dGradOutput = toDeviceTensor<scalar_t, 4>(state, gradOutput); THCDeviceTensor<scalar_t, 4> dGradInput = toDeviceTensor<scalar_t, 4>(state, gradInput); THCDeviceTensor<scalar_t, 4> dWeight = toDeviceTensor<scalar_t, 4>(state, weight); // Kernel currently relies upon all the Tensors to be contiguous THAssert(dGradOutput.isContiguous()); THAssert(dGradInput.isContiguous()); THAssert(dWeight.isContiguous()); // One thread per gradInput value int64_t n = THCTensor_(nElement)(state, gradInput); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); if (kW == 3 && kH == 3) if (dW == 1 && dH == 1){ hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 3, 1>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 2 && dH == 2) { hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 3, 2>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 3, 0>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (kW == 1 && kH == 1) if (dW == 1 && dH == 1){ hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 1, 1>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 2 && dH == 2) { hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 1, 2>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 1, 0>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 1 && dH == 1){ hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 0, 1>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 2 && dH == 2) { hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 0, 2>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 0, 0>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } THCudaCheck(hipGetLastError()); THCTensor_(free)(state, weight); THCTensor_(free)(state, gradOutput); #endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__ } void THNN_(SpatialDepthwiseConvolution_accGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) { #if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__) TORCH_CHECK(false, "SpatialDepthwiseConvolution_accGradParameters not suppported with BFloat16"); #else THCUNN_assertSameGPU(state, 3, input, gradOutput, gradWeight); // Only handle 4D Input Tensors for now THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4); THAssert(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 4); THAssert(!gradWeight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradWeight) == 4); // Minimal shape checking as above // Same # of elements in batch THAssert(input->size(0) == gradOutput->size(0)); // Same # of filters as outputChannels THAssert(gradWeight->size(0) == gradOutput->size(1)); int batchSize = input->size(0); int inputChannels = input->size(1); int height = input->size(2); int width = input->size(3); int outputChannels = gradOutput->size(1); int outputHeight = gradOutput->size(2); int outputWidth = gradOutput->size(3); int depthwiseMultiplier = outputChannels / inputChannels; gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCDeviceTensor<scalar_t, 4> dGradOutput = toDeviceTensor<scalar_t, 4>(state, gradOutput); THCDeviceTensor<scalar_t, 4> dInput = toDeviceTensor<scalar_t, 4>(state, input); THCDeviceTensor<scalar_t, 4> dGradWeight = toDeviceTensor<scalar_t, 4>(state, gradWeight); // Kernel currently relies upon all the Tensors to be contiguous THAssert(dGradOutput.isContiguous()); THAssert(dInput.isContiguous()); THAssert(dGradWeight.isContiguous()); // We parallelize so that each block computes a single value in gradWeight int blocks = outputChannels * kH * kW; // Make sure we have enough threads to perform the reduction, and use this number // to create the shared memory size for the reduction dim3 grid(blocks); dim3 block(getGradParamsNumThreads(batchSize)); int smem = block.x * sizeof(accreal); hipLaunchKernelGGL(( spatialDepthwiseConvolutionAccGradParameters<scalar_t, accreal, unsigned int>), dim3(grid), dim3(block), smem, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dGradOutput, dInput, dGradWeight, batchSize, inputChannels, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, gradOutput); #endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__ } #endif
f0cdf3ba5236faf6d7cf9aea95c22ee6f3db722a.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/SpatialDepthwiseConvolution.cu" #else void THNN_(SpatialDepthwiseConvolution_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight, THCTensor *bias, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) { #if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__) TORCH_CHECK(false, "SpatialDepthwiseConvolution_updateOutput not suppported with BFloat16"); #else THCUNN_assertSameGPU(state, 3, input, output, weight); // Only handle 4D Input Tensors for now THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4); THAssert(!weight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, weight) == 4); // We assume that the input and weight Tensors are shaped properly by // the caller, so we verify that here to some extent // Weight Tensor is shape (output_channels, 1, kH, kW) THAssert(weight->size(1) == 1); // Input Tensor is shape (N, input_channels, H, W) // We verify that the # of output_channels is a multiple of input_channels THAssert(weight->size(0) % input->size(1) == 0); // Bias has same # of channels as output if (bias) { THAssert(THTensor_sizeLegacyNoScalars(bias, 0) == weight->size(0)); } input = THCTensor_(newContiguous)(state, input); weight = THCTensor_(newContiguous)(state, weight); bias = bias ? THCTensor_(newContiguous)(state, bias) : bias; // Following the behavior of other THCUNN functions, we shape the output // Tensor ourselves int batchSize = input->size(0); int height = input->size(2); int width = input->size(3); int outputHeight = (height + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; int outputWidth = (width + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; int outputChannels = weight->size(0); THCTensor_(resize4d)(state, output, batchSize, outputChannels, outputHeight, outputWidth); // Create THCDeviceTensor // Kernel currently relies upon all the Tensors to be contiguous, but we made // them contiguous above THCDeviceTensor<scalar_t, 4> dInput = toDeviceTensor<scalar_t, 4>(state, input); THCDeviceTensor<scalar_t, 4> dWeight = toDeviceTensor<scalar_t, 4>(state, weight); THCDeviceTensor<scalar_t, 4> dOutput = toDeviceTensor<scalar_t, 4>(state, output); THCDeviceTensor<scalar_t, 1> dBias; if (bias) { dBias = toDeviceTensor<scalar_t, 1>(state, bias); } int inputChannels = input->size(1); int depthwiseMultiplier = outputChannels / inputChannels; // One thread per output value int64_t n = THCTensor_(nElement)(state, output); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); if (kW == 3 && kH == 3) { spatialDepthwiseConvolutionUpdateOutput<scalar_t, accreal, unsigned int, 3><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (kW == 1 && kH == 1) { spatialDepthwiseConvolutionUpdateOutput<scalar_t, accreal, unsigned int, 1><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { spatialDepthwiseConvolutionUpdateOutput<scalar_t, accreal, unsigned int, 0><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, input); THCTensor_(free)(state, weight); if (bias) THCTensor_(free)(state, bias); #endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__ } void THNN_(SpatialDepthwiseConvolution_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *weight, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) { #if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__) TORCH_CHECK(false, "SpatialDepthwiseConvolution_updateGradInput not suppported with BFloat16"); #else THCUNN_assertSameGPU(state, 3, gradOutput, gradInput, weight); // Only handle 4D Input Tensors for now THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4); THAssert(!weight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, weight) == 4); THAssert(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 4); // Minimal shape checking, as above // Same # of elements in batch THAssert(input->size(0) == gradOutput->size(0)); // Same # of filters as outputChannels THAssert(weight->size(0) == gradOutput->size(1)); weight = THCTensor_(newContiguous)(state, weight); gradOutput = THCTensor_(newContiguous)(state, gradOutput); // Resize GradInput THCTensor_(resizeAs)(state, gradInput, input); int inputChannels = input->size(1); int height = input->size(2); int width = input->size(3); int outputChannels = gradOutput->size(1); int outputHeight = gradOutput->size(2); int outputWidth = gradOutput->size(3); int depthwiseMultiplier = outputChannels / inputChannels; THCDeviceTensor<scalar_t, 4> dGradOutput = toDeviceTensor<scalar_t, 4>(state, gradOutput); THCDeviceTensor<scalar_t, 4> dGradInput = toDeviceTensor<scalar_t, 4>(state, gradInput); THCDeviceTensor<scalar_t, 4> dWeight = toDeviceTensor<scalar_t, 4>(state, weight); // Kernel currently relies upon all the Tensors to be contiguous THAssert(dGradOutput.isContiguous()); THAssert(dGradInput.isContiguous()); THAssert(dWeight.isContiguous()); // One thread per gradInput value int64_t n = THCTensor_(nElement)(state, gradInput); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); if (kW == 3 && kH == 3) if (dW == 1 && dH == 1){ spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 3, 1><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 2 && dH == 2) { spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 3, 2><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 3, 0><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (kW == 1 && kH == 1) if (dW == 1 && dH == 1){ spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 1, 1><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 2 && dH == 2) { spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 1, 2><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 1, 0><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 1 && dH == 1){ spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 0, 1><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else if (dW == 2 && dH == 2) { spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 0, 2><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } else { spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 0, 0><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); } THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, weight); THCTensor_(free)(state, gradOutput); #endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__ } void THNN_(SpatialDepthwiseConvolution_accGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) { #if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__) TORCH_CHECK(false, "SpatialDepthwiseConvolution_accGradParameters not suppported with BFloat16"); #else THCUNN_assertSameGPU(state, 3, input, gradOutput, gradWeight); // Only handle 4D Input Tensors for now THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4); THAssert(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 4); THAssert(!gradWeight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradWeight) == 4); // Minimal shape checking as above // Same # of elements in batch THAssert(input->size(0) == gradOutput->size(0)); // Same # of filters as outputChannels THAssert(gradWeight->size(0) == gradOutput->size(1)); int batchSize = input->size(0); int inputChannels = input->size(1); int height = input->size(2); int width = input->size(3); int outputChannels = gradOutput->size(1); int outputHeight = gradOutput->size(2); int outputWidth = gradOutput->size(3); int depthwiseMultiplier = outputChannels / inputChannels; gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCDeviceTensor<scalar_t, 4> dGradOutput = toDeviceTensor<scalar_t, 4>(state, gradOutput); THCDeviceTensor<scalar_t, 4> dInput = toDeviceTensor<scalar_t, 4>(state, input); THCDeviceTensor<scalar_t, 4> dGradWeight = toDeviceTensor<scalar_t, 4>(state, gradWeight); // Kernel currently relies upon all the Tensors to be contiguous THAssert(dGradOutput.isContiguous()); THAssert(dInput.isContiguous()); THAssert(dGradWeight.isContiguous()); // We parallelize so that each block computes a single value in gradWeight int blocks = outputChannels * kH * kW; // Make sure we have enough threads to perform the reduction, and use this number // to create the shared memory size for the reduction dim3 grid(blocks); dim3 block(getGradParamsNumThreads(batchSize)); int smem = block.x * sizeof(accreal); spatialDepthwiseConvolutionAccGradParameters<scalar_t, accreal, unsigned int><<<grid, block, smem, c10::cuda::getCurrentCUDAStream()>>>( dGradOutput, dInput, dGradWeight, batchSize, inputChannels, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); #endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__ } #endif
809ec4863b3e6c950248a7ea32e7695db2b4ea95.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <cfloat> #include <time.h> #include <thrust/extrema.h> #include <Eigen/Geometry> #include <rocblas.h> #define DIRECTIONS_CHANNELS 2 #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) // Compute 2D Euclidean distance between [x,y] and [u,v] __device__ inline float distance(int x, int y, int u, int v) { float dx = x - u; float dy = y - v; return sqrt(dx * dx + dy * dy); } // Compute the cosine similarity between [cx,cy] - [x,y], and [u,v] __device__ inline float angle_distance(int cx, int cy, int x, int y, float u, float v) { float dx = cx - x; float dy = cy - y; float n1 = sqrt(u * u + v * v); float n2 = sqrt(dx * dx + dy * dy); float dot = u * dx + v * dy; float distance = dot / (n1 * n2); return distance; } // Compute the arrays kernel, which gives a list of foreground pixel locations __global__ void compute_arrays_kernel(const int nthreads, const int* labelPointer, int* arrays, int* array_size) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int cls = labelPointer[index]; if (cls > 0) { int offset = atomicAdd(array_size, 1); arrays[offset] = index; } } } // Compute dot product with [0, 1] __device__ inline int discretization_angle_index(int cx, int cy, int x, int y, int angle_discretization) { float cos_sim = .5 * (angle_distance(cx, cy, x, y, 0., 1.) + 1); // in range [0,1] int angle_index = static_cast<int>(floor(cos_sim * (float) angle_discretization)); if (angle_index == angle_discretization) { angle_index--; } return angle_index; } // Compute the hough map __global__ void compute_hough_kernel(const int nthreads, float* houghDirectionsPointer, const float* directionsPointer, int* arrays, int* array_size, const int height, const int width, const float inlierThreshold, const int skip_pixels, const int angle_discretization, int inlier_distance) { int size = *array_size; CUDA_1D_KERNEL_LOOP(index, nthreads) // index is a pixel { // (cx, cy) is an element in the hough space. this is the index of the 2D hough_space tensor int cx = index % width; int cy = index / width; for (int i = 0; i < size; i += skip_pixels) { int offset = i; int location = arrays[offset]; int x = location % width; int y = location / width; // read the direction offset = y * width + x; float v = directionsPointer[offset]; offset = height * width + y * width + x; float u = directionsPointer[offset]; // vote if (angle_distance(cx, cy, x, y, u, v) > inlierThreshold && distance(cx, cy, x, y) < inlier_distance) { // Compute discretization angle int angle_index = discretization_angle_index(cx, cy, x, y, angle_discretization); houghDirectionsPointer[angle_index * height * width + index] = 1; } } } } // Compute the object center pixel locations __global__ void compute_object_center_indices_kernel(const int nthreads, int* objectCenterIndicesPointer, int* numObjectsPointer, int max_objects_to_consider, float* houghDirectionsPointer, int height, int width, float percentageThreshold, int object_center_kernel_radius) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (cx, cy) is an element in the hough space. this is the index of the 2D hough_space tensor int cx = index % width; int cy = index / width; int max_index = -1; if (houghDirectionsPointer[index] > percentageThreshold) { // check if the location is local maximum int is_local_max = 1; for (int x = cx - object_center_kernel_radius; x <= cx + object_center_kernel_radius; x++) { for (int y = cy - object_center_kernel_radius; y <= cy + object_center_kernel_radius; y++) { if (x >= 0 && x < width && y >= 0 && y < height) { if (houghDirectionsPointer[y * width + x] > houghDirectionsPointer[index]) { is_local_max = 0; break; } // tie breaking if (houghDirectionsPointer[y * width + x] == houghDirectionsPointer[index] && y * width + x > index) { is_local_max = 0; break; } } } } // add the location to object_center_indices if (is_local_max == 1 && (max_index + 1) < max_objects_to_consider) { max_index = atomicAdd(numObjectsPointer, 1); objectCenterIndicesPointer[max_index] = index; // printf("Max_index: %d, numObjectsPointer: %d", max_index, *numObjectsPointer); // printf can be used on CUDA device. prints to terminal (not jupyter notebook). Note that no stderr is used here. GPU doesn't know what that is } } } } // Compute the hough map __global__ void compute_initial_masks_kernel(const int nthreads, int* initialMasksPointer, int* objectCenterIndicesPointer, int* numObjectsPointer, const float* directionsPointer, int* arrays, const int height, const int width, const float inlierThreshold) { // int size = *array_size; // TODO: Use this in CUDA_1D_KERNEL_LOOP int num_object_centers = *numObjectsPointer; CUDA_1D_KERNEL_LOOP(index, nthreads) // index is a foreground pixel location { // Get (x,y) pixel location int location = arrays[index]; int x = location % width; int y = location / width; // read the direction int offset = y * width + x; float v = directionsPointer[offset]; offset = height * width + y * width + x; float u = directionsPointer[offset]; // Keep track of closest center int closest_center_index = -1; float closest_center_distance = 1.0e6; // Something ridiculously large to start with for (int i = 0; i < num_object_centers; i++) { // Get (cx, cy) object center int cx = objectCenterIndicesPointer[i] % width; int cy = objectCenterIndicesPointer[i] / width; float dist_to_center = distance(cx, cy, x, y); float dist_in_angle = angle_distance(cx, cy, x, y, u, v); if (dist_in_angle > inlierThreshold && dist_to_center < closest_center_distance) { closest_center_index = i; closest_center_distance = dist_to_center; } } if (closest_center_index > -1) // We chose a center { initialMasksPointer[closest_center_index * height * width + location] = 1; // printf("Pixel (%d, %d) chose a center!\n", x, y); } } } std::vector<at::Tensor> hough_voting_cuda_forward( at::Tensor label, at::Tensor directions, int skip_pixels, float inlierThreshold, int angle_discretization, int inlier_distance, float percentageThreshold, int object_center_kernel_radius) { const int kThreadsPerBlock = 1024; hipError_t err; const int batch_size = directions.size(0); const int height = directions.size(2); const int width = directions.size(3); int num_pixels = height * width; // Create initial center masks int max_objects_to_consider = 50; // Cap number of potential objects to 50 auto initial_masks = at::zeros({batch_size, max_objects_to_consider, height, width}, label.options()); auto object_center_indices = at::zeros({batch_size, max_objects_to_consider}, label.options()); auto num_objects = at::zeros({batch_size}, label.options()); for (int batch_index = 0; batch_index < batch_size; batch_index++) { // Get all the pointers for this batch const int* labelPointer = label.data<int>() + batch_index * height * width; const float* directionsPointer = directions.data<float>() + batch_index * height * width * DIRECTIONS_CHANNELS; // step 1: compute a label index array auto arrays = at::zeros({height * width}, label.options()); // this array is a list of foreground pixel indices auto array_sizes = at::zeros({1}, label.options()); // how many pixels vote for foreground? hipLaunchKernelGGL(( compute_arrays_kernel), dim3((num_pixels + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, 0, num_pixels, labelPointer, arrays.data<int>(), array_sizes.data<int>()); // fills those pixel arrays hipDeviceSynchronize(); // Check for errors err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute label index: %s\n", hipGetErrorString( err ) ); // fprintf can be used from CPU. prints to terminal (not jupyter notebook) exit( -1 ); } // step 2: compute the hough directions auto hough_directions_frame = at::zeros({angle_discretization, height, width}, directions.options()); hipLaunchKernelGGL(( compute_hough_kernel), dim3((num_pixels + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, 0, num_pixels, hough_directions_frame.data<float>(), directionsPointer, arrays.data<int>(), array_sizes.data<int>(), height, width, inlierThreshold, skip_pixels, angle_discretization, inlier_distance); hough_directions_frame = hough_directions_frame.sum(/*dim=*/0) / angle_discretization; // Divide to get a percentage. Shape: [H x W] hipDeviceSynchronize(); // Check for errors err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute hough space: %s\n", hipGetErrorString( err ) ); exit( -1 ); } // step 3: find the local maximums in hough directions auto object_center_indices_frame = at::zeros({max_objects_to_consider}, label.options()); auto num_objects_frame = at::zeros({1}, label.options()); hipLaunchKernelGGL(( compute_object_center_indices_kernel), dim3((num_pixels + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, 0, num_pixels, object_center_indices_frame.data<int>(), num_objects_frame.data<int>(), max_objects_to_consider, hough_directions_frame.data<float>(), height, width, percentageThreshold, object_center_kernel_radius); hipDeviceSynchronize(); // Check for errors err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute maximum: %s\n", hipGetErrorString( err ) ); exit( -1 ); } // Copy the object centers for this frame to the tensor hipMemcpy(object_center_indices.data<int>() + batch_index * max_objects_to_consider, object_center_indices_frame.data<int>(), max_objects_to_consider * sizeof(int), hipMemcpyDeviceToDevice); hipMemcpy(num_objects.data<int>() + batch_index, num_objects_frame.data<int>(), sizeof(int), hipMemcpyDeviceToDevice); // step 4: Get initial masks for each object center auto initial_masks_frame = at::zeros({max_objects_to_consider, height, width}, label.options()); int num_foreground_pixels, num_objects_host; hipMemcpy(&num_foreground_pixels, array_sizes.data<int>(), sizeof(int), hipMemcpyDeviceToHost); // Copy array_sizes from GPU memory to CPU memory if (num_foreground_pixels > 0) // Calling CUDA_1D_KERNEL_LOOP with nthreads = 0 poops out { hipLaunchKernelGGL(( compute_initial_masks_kernel), dim3((num_foreground_pixels + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, 0, num_foreground_pixels, initial_masks_frame.data<int>(), object_center_indices_frame.data<int>(), num_objects_frame.data<int>(), directionsPointer, arrays.data<int>(), height, width, inlierThreshold); hipDeviceSynchronize(); } // Check for errors err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute initial masks: %s\n", hipGetErrorString( err ) ); exit( -1 ); } // Copy the initial masks for this frame to the tensor hipMemcpy(initial_masks.data<int>() + batch_index * max_objects_to_consider * height * width, initial_masks_frame.data<int>(), max_objects_to_consider * height * width * sizeof(int), hipMemcpyDeviceToDevice); } return {initial_masks, num_objects, object_center_indices}; }
809ec4863b3e6c950248a7ea32e7695db2b4ea95.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <cfloat> #include <time.h> #include <thrust/extrema.h> #include <Eigen/Geometry> #include <cublas_v2.h> #define DIRECTIONS_CHANNELS 2 #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) // Compute 2D Euclidean distance between [x,y] and [u,v] __device__ inline float distance(int x, int y, int u, int v) { float dx = x - u; float dy = y - v; return sqrt(dx * dx + dy * dy); } // Compute the cosine similarity between [cx,cy] - [x,y], and [u,v] __device__ inline float angle_distance(int cx, int cy, int x, int y, float u, float v) { float dx = cx - x; float dy = cy - y; float n1 = sqrt(u * u + v * v); float n2 = sqrt(dx * dx + dy * dy); float dot = u * dx + v * dy; float distance = dot / (n1 * n2); return distance; } // Compute the arrays kernel, which gives a list of foreground pixel locations __global__ void compute_arrays_kernel(const int nthreads, const int* labelPointer, int* arrays, int* array_size) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int cls = labelPointer[index]; if (cls > 0) { int offset = atomicAdd(array_size, 1); arrays[offset] = index; } } } // Compute dot product with [0, 1] __device__ inline int discretization_angle_index(int cx, int cy, int x, int y, int angle_discretization) { float cos_sim = .5 * (angle_distance(cx, cy, x, y, 0., 1.) + 1); // in range [0,1] int angle_index = static_cast<int>(floor(cos_sim * (float) angle_discretization)); if (angle_index == angle_discretization) { angle_index--; } return angle_index; } // Compute the hough map __global__ void compute_hough_kernel(const int nthreads, float* houghDirectionsPointer, const float* directionsPointer, int* arrays, int* array_size, const int height, const int width, const float inlierThreshold, const int skip_pixels, const int angle_discretization, int inlier_distance) { int size = *array_size; CUDA_1D_KERNEL_LOOP(index, nthreads) // index is a pixel { // (cx, cy) is an element in the hough space. this is the index of the 2D hough_space tensor int cx = index % width; int cy = index / width; for (int i = 0; i < size; i += skip_pixels) { int offset = i; int location = arrays[offset]; int x = location % width; int y = location / width; // read the direction offset = y * width + x; float v = directionsPointer[offset]; offset = height * width + y * width + x; float u = directionsPointer[offset]; // vote if (angle_distance(cx, cy, x, y, u, v) > inlierThreshold && distance(cx, cy, x, y) < inlier_distance) { // Compute discretization angle int angle_index = discretization_angle_index(cx, cy, x, y, angle_discretization); houghDirectionsPointer[angle_index * height * width + index] = 1; } } } } // Compute the object center pixel locations __global__ void compute_object_center_indices_kernel(const int nthreads, int* objectCenterIndicesPointer, int* numObjectsPointer, int max_objects_to_consider, float* houghDirectionsPointer, int height, int width, float percentageThreshold, int object_center_kernel_radius) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (cx, cy) is an element in the hough space. this is the index of the 2D hough_space tensor int cx = index % width; int cy = index / width; int max_index = -1; if (houghDirectionsPointer[index] > percentageThreshold) { // check if the location is local maximum int is_local_max = 1; for (int x = cx - object_center_kernel_radius; x <= cx + object_center_kernel_radius; x++) { for (int y = cy - object_center_kernel_radius; y <= cy + object_center_kernel_radius; y++) { if (x >= 0 && x < width && y >= 0 && y < height) { if (houghDirectionsPointer[y * width + x] > houghDirectionsPointer[index]) { is_local_max = 0; break; } // tie breaking if (houghDirectionsPointer[y * width + x] == houghDirectionsPointer[index] && y * width + x > index) { is_local_max = 0; break; } } } } // add the location to object_center_indices if (is_local_max == 1 && (max_index + 1) < max_objects_to_consider) { max_index = atomicAdd(numObjectsPointer, 1); objectCenterIndicesPointer[max_index] = index; // printf("Max_index: %d, numObjectsPointer: %d", max_index, *numObjectsPointer); // printf can be used on CUDA device. prints to terminal (not jupyter notebook). Note that no stderr is used here. GPU doesn't know what that is } } } } // Compute the hough map __global__ void compute_initial_masks_kernel(const int nthreads, int* initialMasksPointer, int* objectCenterIndicesPointer, int* numObjectsPointer, const float* directionsPointer, int* arrays, const int height, const int width, const float inlierThreshold) { // int size = *array_size; // TODO: Use this in CUDA_1D_KERNEL_LOOP int num_object_centers = *numObjectsPointer; CUDA_1D_KERNEL_LOOP(index, nthreads) // index is a foreground pixel location { // Get (x,y) pixel location int location = arrays[index]; int x = location % width; int y = location / width; // read the direction int offset = y * width + x; float v = directionsPointer[offset]; offset = height * width + y * width + x; float u = directionsPointer[offset]; // Keep track of closest center int closest_center_index = -1; float closest_center_distance = 1.0e6; // Something ridiculously large to start with for (int i = 0; i < num_object_centers; i++) { // Get (cx, cy) object center int cx = objectCenterIndicesPointer[i] % width; int cy = objectCenterIndicesPointer[i] / width; float dist_to_center = distance(cx, cy, x, y); float dist_in_angle = angle_distance(cx, cy, x, y, u, v); if (dist_in_angle > inlierThreshold && dist_to_center < closest_center_distance) { closest_center_index = i; closest_center_distance = dist_to_center; } } if (closest_center_index > -1) // We chose a center { initialMasksPointer[closest_center_index * height * width + location] = 1; // printf("Pixel (%d, %d) chose a center!\n", x, y); } } } std::vector<at::Tensor> hough_voting_cuda_forward( at::Tensor label, at::Tensor directions, int skip_pixels, float inlierThreshold, int angle_discretization, int inlier_distance, float percentageThreshold, int object_center_kernel_radius) { const int kThreadsPerBlock = 1024; cudaError_t err; const int batch_size = directions.size(0); const int height = directions.size(2); const int width = directions.size(3); int num_pixels = height * width; // Create initial center masks int max_objects_to_consider = 50; // Cap number of potential objects to 50 auto initial_masks = at::zeros({batch_size, max_objects_to_consider, height, width}, label.options()); auto object_center_indices = at::zeros({batch_size, max_objects_to_consider}, label.options()); auto num_objects = at::zeros({batch_size}, label.options()); for (int batch_index = 0; batch_index < batch_size; batch_index++) { // Get all the pointers for this batch const int* labelPointer = label.data<int>() + batch_index * height * width; const float* directionsPointer = directions.data<float>() + batch_index * height * width * DIRECTIONS_CHANNELS; // step 1: compute a label index array auto arrays = at::zeros({height * width}, label.options()); // this array is a list of foreground pixel indices auto array_sizes = at::zeros({1}, label.options()); // how many pixels vote for foreground? compute_arrays_kernel<<<(num_pixels + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( num_pixels, labelPointer, arrays.data<int>(), array_sizes.data<int>()); // fills those pixel arrays cudaThreadSynchronize(); // Check for errors err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute label index: %s\n", cudaGetErrorString( err ) ); // fprintf can be used from CPU. prints to terminal (not jupyter notebook) exit( -1 ); } // step 2: compute the hough directions auto hough_directions_frame = at::zeros({angle_discretization, height, width}, directions.options()); compute_hough_kernel<<<(num_pixels + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( num_pixels, hough_directions_frame.data<float>(), directionsPointer, arrays.data<int>(), array_sizes.data<int>(), height, width, inlierThreshold, skip_pixels, angle_discretization, inlier_distance); hough_directions_frame = hough_directions_frame.sum(/*dim=*/0) / angle_discretization; // Divide to get a percentage. Shape: [H x W] cudaThreadSynchronize(); // Check for errors err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute hough space: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // step 3: find the local maximums in hough directions auto object_center_indices_frame = at::zeros({max_objects_to_consider}, label.options()); auto num_objects_frame = at::zeros({1}, label.options()); compute_object_center_indices_kernel<<<(num_pixels + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( num_pixels, object_center_indices_frame.data<int>(), num_objects_frame.data<int>(), max_objects_to_consider, hough_directions_frame.data<float>(), height, width, percentageThreshold, object_center_kernel_radius); cudaThreadSynchronize(); // Check for errors err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute maximum: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // Copy the object centers for this frame to the tensor cudaMemcpy(object_center_indices.data<int>() + batch_index * max_objects_to_consider, object_center_indices_frame.data<int>(), max_objects_to_consider * sizeof(int), cudaMemcpyDeviceToDevice); cudaMemcpy(num_objects.data<int>() + batch_index, num_objects_frame.data<int>(), sizeof(int), cudaMemcpyDeviceToDevice); // step 4: Get initial masks for each object center auto initial_masks_frame = at::zeros({max_objects_to_consider, height, width}, label.options()); int num_foreground_pixels, num_objects_host; cudaMemcpy(&num_foreground_pixels, array_sizes.data<int>(), sizeof(int), cudaMemcpyDeviceToHost); // Copy array_sizes from GPU memory to CPU memory if (num_foreground_pixels > 0) // Calling CUDA_1D_KERNEL_LOOP with nthreads = 0 poops out { compute_initial_masks_kernel<<<(num_foreground_pixels + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( num_foreground_pixels, initial_masks_frame.data<int>(), object_center_indices_frame.data<int>(), num_objects_frame.data<int>(), directionsPointer, arrays.data<int>(), height, width, inlierThreshold); cudaThreadSynchronize(); } // Check for errors err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute initial masks: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // Copy the initial masks for this frame to the tensor cudaMemcpy(initial_masks.data<int>() + batch_index * max_objects_to_consider * height * width, initial_masks_frame.data<int>(), max_objects_to_consider * height * width * sizeof(int), cudaMemcpyDeviceToDevice); } return {initial_masks, num_objects, object_center_indices}; }
d11b9d8f7543d611b62853ff9c8a84b9f73d31d9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <mpi.h> #include "species.h" #include "version.h" int main(int argc, char* argv[]) { MPI_Init(&argc, &argv); MPI_Comm mpcom_ftn = MPI_COMM_WORLD; printf("Version: %s \t Compiled: %s \n", build_git_sha, build_git_time); Species * species_h = nullptr; species_h = new Species(1); species_h -> load(0, 1., 1., 1., 1., 0., 0., 2., 0.1, 0); printf("mass = %f \n",species_h->mass[0]); MPI_Finalize(); hipDeviceReset(); }
d11b9d8f7543d611b62853ff9c8a84b9f73d31d9.cu
#include <stdlib.h> #include <stdio.h> #include <mpi.h> #include "species.h" #include "version.h" int main(int argc, char* argv[]) { MPI_Init(&argc, &argv); MPI_Comm mpcom_ftn = MPI_COMM_WORLD; printf("Version: %s \t Compiled: %s \n", build_git_sha, build_git_time); Species * species_h = nullptr; species_h = new Species(1); species_h -> load(0, 1., 1., 1., 1., 0., 0., 2., 0.1, 0); printf("mass = %f \n",species_h->mass[0]); MPI_Finalize(); cudaDeviceReset(); }
d79ab6da348eadff67afa93743ca1f0a366f5dfe.hip
// !!! This is a file automatically generated by hipify!!! #include <mpi.h> #include <hip/hip_runtime.h> #include "clustering.h" #include "clustering_io.h" #define FILE_NAME_INDEX 1 #define ITEMS_COUNT_INDEX 2 #define PARAMS_COUNT_INDEX 3 #define CLUSTERS_COUNT_INDEX 4 int main(int argc, char** argv) { int rank, size; int itemsCount, paramsCount, clustersCount; int itemsPerProc, offset; char* filename; double start, end; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); start = MPI_Wtime(); //read cl parameters if(argc < 5) { if(rank == 0) { printf("Parameters must be {file} {number of items} {number of params} {number of clusters}\n"); } MPI_Finalize(); exit(1); } itemsCount = atoi(argv[ITEMS_COUNT_INDEX]); paramsCount = atoi(argv[PARAMS_COUNT_INDEX]); clustersCount = atoi(argv[CLUSTERS_COUNT_INDEX]); filename = argv[FILE_NAME_INDEX]; if(itemsCount < 1 || paramsCount < 1 || clustersCount < 1) { if(rank == 0) { printf("INVALID PARAMETERS"); } MPI_Finalize(); exit(2); } //items per mpi-process itemsPerProc = (itemsCount + size - 1) / size; //calc file offset offset = itemsPerProc * rank; //last process correction if(size > 2 && rank == size - 1) { itemsPerProc = itemsPerProc - 1; } kMeans(filename, itemsPerProc, offset, clustersCount, paramsCount, size, rank); end = MPI_Wtime(); //printf("rank: %d\n", rank); if(rank == 0); { testSave(end - start, itemsCount, paramsCount, clustersCount, size); } MPI_Finalize(); return 0; }
d79ab6da348eadff67afa93743ca1f0a366f5dfe.cu
#include <mpi.h> #include <cuda.h> #include "clustering.h" #include "clustering_io.h" #define FILE_NAME_INDEX 1 #define ITEMS_COUNT_INDEX 2 #define PARAMS_COUNT_INDEX 3 #define CLUSTERS_COUNT_INDEX 4 int main(int argc, char** argv) { int rank, size; int itemsCount, paramsCount, clustersCount; int itemsPerProc, offset; char* filename; double start, end; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); start = MPI_Wtime(); //read cl parameters if(argc < 5) { if(rank == 0) { printf("Parameters must be {file} {number of items} {number of params} {number of clusters}\n"); } MPI_Finalize(); exit(1); } itemsCount = atoi(argv[ITEMS_COUNT_INDEX]); paramsCount = atoi(argv[PARAMS_COUNT_INDEX]); clustersCount = atoi(argv[CLUSTERS_COUNT_INDEX]); filename = argv[FILE_NAME_INDEX]; if(itemsCount < 1 || paramsCount < 1 || clustersCount < 1) { if(rank == 0) { printf("INVALID PARAMETERS"); } MPI_Finalize(); exit(2); } //items per mpi-process itemsPerProc = (itemsCount + size - 1) / size; //calc file offset offset = itemsPerProc * rank; //last process correction if(size > 2 && rank == size - 1) { itemsPerProc = itemsPerProc - 1; } kMeans(filename, itemsPerProc, offset, clustersCount, paramsCount, size, rank); end = MPI_Wtime(); //printf("rank: %d\n", rank); if(rank == 0); { testSave(end - start, itemsCount, paramsCount, clustersCount, size); } MPI_Finalize(); return 0; }
e1b57d892468c1c93525b634c085a2e8daa92b20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file layer_norm.cu * \brief Implements Ba et. al, Layer Normalization (https://arxiv.org/abs/1607.06450). */ #include "./layer_norm-inl.h" using namespace mshadow::cuda; namespace mxnet { namespace op { template <typename DType> __device__ __forceinline__ DType warp_shfl(DType value, int src_lane, int width = 32, unsigned int mask = 0xffffffff) { #if TORCH_HIP_VERSION >= 9000 return __shfl_sync(mask, value, src_lane, width); #else return __shfl(value, src_lane, width); #endif } template <typename DType> __device__ __forceinline__ DType warp_shfl_xor(DType value, int laneMask, int width = 32, unsigned int mask = 0xffffffff) { #if TORCH_HIP_VERSION >= 9000 return __shfl_xor_sync(mask, value, laneMask, width); #else return __shfl_xor(value, laneMask, width); #endif } /* A single updating step of the Welford's online algorithm to calculate the mean and variance. * The value 'curr' will be accumulated to the (mean, sigma2, count) triplet. * */ template<typename DType, typename IType> __device__ __forceinline__ void StepWelfordOnlineSum(const DType curr, DType& mean, //NOLINT DType& sigma2, //NOLINT IType& count) { //NOLINT count += IType(1); DType delta = curr - mean; mean += delta / count; sigma2 += delta * (curr - mean); } /* Merge the mean/variance of two partitions. It's the key step of the Chan's parallel algorithm. * The (lhs_mean, lhs_sigma2, lhs_count) will be merged into (rhs_mean, rhs_sigma2, rhs_count) * * See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance for more details. * * TODO(sxjscience) Explore the possibility of int lhs_count and rhs_count */ template<typename DType, typename IType> __device__ __inline__ void ChanMergePartition(const DType lhs_mean, const DType lhs_sigma2, const IType lhs_count, DType& rhs_mean, //NOLINT DType& rhs_sigma2, //NOLINT IType& rhs_count) { //NOLINT DType delta = rhs_mean - lhs_mean; DType nA = static_cast<DType>(lhs_count); DType nB = static_cast<DType>(rhs_count); rhs_count = nA + nB; if (rhs_count > DType(0)) { nA = nA / rhs_count; nB = nB / rhs_count; rhs_mean = nA * lhs_mean + nB * rhs_mean; rhs_sigma2 = rhs_sigma2 + lhs_sigma2 + delta * delta * nA * nB * rhs_count; } else { rhs_mean = DType(0); rhs_sigma2 = DType(0); } } /* Split the input column into multiple partitions and compute the mean/sigma of each partition. * Each thread will keep a mean/sigma2. The mean/sigma2 can be further merged to get the mean and * sigma2 of the column. */ template<typename AType, typename DType, typename IType> __device__ __forceinline__ void BlockWelfordOnlineSum(const DType* __restrict__ col_vals, const int nchannel, AType& mean, //NOLINT AType& sigma2, //NOLINT IType& count) { //NOLINT int tid = threadIdx.x + threadIdx.y * blockDim.x; const int nthread = blockDim.x * blockDim.y; // Each thread takes charge of 4 consecutive numbers. This should optimize the loading speed using // vectorized types like float4. // Also, to minimize branch divergence, we split the for-loop into two parts. int l = 4 * tid; for (; l + 3 < nchannel; l += 4 * nthread) { #pragma unroll for (int i = 0; i < 4; ++i) { StepWelfordOnlineSum(static_cast<AType>(col_vals[l + i]), mean, sigma2, count); } } for (; l < nchannel; ++l) { StepWelfordOnlineSum(static_cast<AType>(col_vals[l]), mean, sigma2, count); } } template<> __device__ __forceinline__ void BlockWelfordOnlineSum<float, mshadow::half::half_t, int> (const mshadow::half::half_t* __restrict__ col_vals, const int nchannel, float& mean, //NOLINT float& sigma2, //NOLINT int& count) { //NOLINT int tid = threadIdx.x + threadIdx.y * blockDim.x; const int nthread = blockDim.x * blockDim.y; // We cast the input half pointer to half2 to optimize the loading speed. // Here, we need to notice that CUDA forces memory alignment, i.e., // ASSERT static_cast<size_t>(ptr) % sizeof(dtype) == 0. // Thus, we need to shift the address of the half pointer to be aligned by half2. int align_shift = (reinterpret_cast<size_t>(col_vals) % 4) != 0; int padding = (nchannel - align_shift) % 2; int half2_size = (nchannel - align_shift) / 2; const __half2* half2_col_vals = reinterpret_cast<const __half2*>(col_vals + align_shift); if (threadIdx.x == 0 && threadIdx.y == 0) { if (align_shift) { StepWelfordOnlineSum(__half2float(col_vals[0].cuhalf_), mean, sigma2, count); } if (padding) { StepWelfordOnlineSum(__half2float(col_vals[nchannel - 1].cuhalf_), mean, sigma2, count); } } for (int l = tid; l < half2_size; l += nthread) { float2 ele_val = __half22float2(half2_col_vals[l]); StepWelfordOnlineSum(ele_val.x, mean, sigma2, count); StepWelfordOnlineSum(ele_val.y, mean, sigma2, count); } } /* Fused CUDA kernel for the forward pass of layer normalization. * It computes the LayerNorm when axis=-1, i.e., contiguous reduction scenario. * Shape of the input tensors: * in_data = (nbatch, nchannel) * gamma = (nchannel,) * beta = (nchannel,) * out_data = (nchannel,) * mean_data = (nbatch,) * var_data = (nbatch,) * It's always launched with (blockDim.x, blockDim.y) = (WARP_SIZE, blockDim.y) * Also, when blockDim.y > 1, it requires shared memory that has size: * sizeof(AType) * blockDim.y + sizeof(int) * blockDim.y / 2 */ template<typename AType, typename DType, typename IType> __global__ void LayerNormFusedForwardKernelContig(const int nbatch, const int nchannel, const AType eps, const DType* __restrict__ in_data, const DType* __restrict__ gamma, const DType* __restrict__ beta, DType* __restrict__ out_data, DType* __restrict__ mean_data, DType* __restrict__ std_data) { int bid = blockIdx.x + blockIdx.y * gridDim.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int nthread = blockDim.x * blockDim.y; IType count = 0; AType mean = 0; AType sigma2 = 0; if (bid < nbatch) { extern __shared__ char buf[]; // Shared memory const DType* col_vals = in_data + bid * nchannel; BlockWelfordOnlineSum(col_vals, nchannel, mean, sigma2, count); // Merge the mean/sigma2 within a warp // Use the Chan's Parallel Algorithm to merge all (mean, sigma2, counts) // within a warp of threads. // After calling the function, threadIdx.x == 0 will store the result of // the aggregated (mean, sigma2, counts). for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) { AType meanB = warp_shfl_xor(mean, mask); AType sigma2B = warp_shfl_xor(sigma2, mask); IType countB = warp_shfl_xor(count, mask); ChanMergePartition(meanB, sigma2B, countB, mean, sigma2, count); } if (blockDim.y > 1) { // Inter-warp reduction. Copy the upper-half of the warps to shared memory // and merge with the lower-half warp AType* mean_buf = reinterpret_cast<AType*>(buf); AType* sigma2_buf = reinterpret_cast<AType*>(buf + sizeof(AType) * blockDim.y / 2 * blockDim.x); IType* count_buf = reinterpret_cast<IType*>(buf + sizeof(AType) * blockDim.y * blockDim.x); for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) { if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; mean_buf[idx] = mean; sigma2_buf[idx] = sigma2; count_buf[idx] = count; } __syncthreads(); if (threadIdx.y < offset) { const int idx = threadIdx.y * blockDim.x + threadIdx.x; ChanMergePartition(mean_buf[idx], sigma2_buf[idx], count_buf[idx], mean, sigma2, count); } __syncthreads(); } // Broadcast the result to all threads if (threadIdx.y == 0) { mean_buf[threadIdx.x] = mean; sigma2_buf[threadIdx.x] = sigma2; } __syncthreads(); mean = mean_buf[threadIdx.x]; sigma2 = sigma2_buf[threadIdx.x] / nchannel; } else { sigma2 /= nchannel; } // Calculate the out_data: gamma * (x - mean) / sqrt(var + eps) + beta AType std_eps = sqrt(sigma2 + eps); AType invstd_eps = DType(1.0) / std_eps; DType* out_col_val = out_data + bid * nchannel; if (gamma != NULL && beta != NULL) { for (int i = tid; i < nchannel; i += nthread) { out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean)) + beta[i]; } } else if (gamma == NULL && beta != NULL) { for (int i = tid; i < nchannel; i += nthread) { out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean)) + beta[i]; } } else if (gamma != NULL && beta == NULL) { for (int i = tid; i < nchannel; i += nthread) { out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean)); } } else { for (int i = tid; i < nchannel; i += nthread) { out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean)); } } // Write the out_data and var_data if (threadIdx.x == 0 && threadIdx.y == 0) { mean_data[bid] = static_cast<DType>(mean); std_data[bid] = static_cast<DType>(std_eps); } } } template<bool safe_acc = false> void LayerNormGPUContig(const LayerNormParam param, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 3U); mxnet::TShape data_shape(2, 0); mxnet::TShape mean_shape(1, 0); size_t in_ndim = inputs[layernorm::kData].ndim(); data_shape[0] = mean_shape[0] = inputs[layernorm::kData].shape_.ProdShape(0, in_ndim - 1); data_shape[1] = inputs[layernorm::kData].shape_[in_ndim - 1]; const TBlob in_data = inputs[layernorm::kData].reshape(data_shape); const TBlob gamma = inputs[layernorm::kGamma]; const TBlob beta = inputs[layernorm::kBeta]; const TBlob out_data = outputs[layernorm::kOut].reshape(data_shape); const TBlob mean_data = outputs[layernorm::kMean].reshape(mean_shape); const TBlob std_data = outputs[layernorm::kStd].reshape(mean_shape); // Make sure the inputs are contiguous CHECK_EQ(in_data.CheckContiguous(), true); CHECK_EQ(gamma.CheckContiguous(), true); CHECK_EQ(beta.CheckContiguous(), true); CHECK_EQ(out_data.CheckContiguous(), true); CHECK_EQ(mean_data.CheckContiguous(), true); CHECK_EQ(std_data.CheckContiguous(), true); // Lauch the kernel. The dynamic shared memory size is // sizeof(DType) * blockDim.y * blockDim.x + sizeof(DType) * blockDim.y / 2 * blockDim.x int nbatch = data_shape[0]; int nchannel = data_shape[1]; float eps = param.eps; int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch; int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1; int nthread_y; const dim3 dimGrid(ngrid_x, ngrid_y); if (nchannel <= 128) { nthread_y = 1; } else if (nchannel <= 512) { nthread_y = 2; } else { nthread_y = 4; } hipStream_t stream = Stream<gpu>::GetStream(ctx.get_stream<gpu>()); const dim3 dimBlock(32, nthread_y); MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { typedef typename std::conditional<safe_acc, AccType, DType>::type AType; int nshared = nthread_y > 1 ? nthread_y * 32 * sizeof(AType) + (nthread_y / 2) * 32 * sizeof(int) : 0; CheckLaunchParam(dimGrid, dimBlock); hipLaunchKernelGGL(( LayerNormFusedForwardKernelContig<AType, DType, int>) , dim3(dimGrid), dim3(dimBlock), nshared, stream, nbatch, nchannel, static_cast<AType>(eps), in_data.dptr<DType>(), gamma.dptr<DType>(), beta.dptr<DType>(), out_data.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>()); }); MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedForwardKernelContig); } template<> void LayerNormCompute<gpu>(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed); if (req[0] == kNullOp) return; CHECK_NE(req[0], kAddTo); int axis = param.axis; if (axis < 0) { axis += static_cast<int>(inputs[0].ndim()); } CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis; if (axis == inputs[0].ndim() - 1) { // Try to use the accelerated CUDA kernels bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false); if (safe_acc) { return LayerNormGPUContig<true>(param, ctx, inputs, req, outputs); } else { return LayerNormGPUContig<false>(param, ctx, inputs, req, outputs); } } return LayerNormComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs); } /* Fused CUDA kernel for calculating the gradient w.r.t gamma/beta in LayerNorm when axis=-1 * (Contiguous case). * The gradient of gamma and beta are: * d_gamma = sum(out_grad * (x - mean) / std, axis=0) * d_beta = sum(out_grad, axis=0) * * We compute the gradient (mainly reduction over a non-contiguous axis) using two steps to * improve the parallelism. * * In the first step, we divide the rows uniformly into K parts. K independent threadblocks are used * to calculate the partial reduction result of each part. Illustrated below: * * 1st Block 2nd Block 3rd Block k-th Block * | --------------- | ---------------- | --------------- | ... | ---------------- | * | --------------- | ---------------- | --------------- | ... | ---------------- | * | --------------- | ---------------- | --------------- | ... | ---------------- | * | --------------- | ---------------- | --------------- | ... | ---------------- | * part_gamma[0] part_gamma[1] part_gamma[2] part_gamma[k-1] * part_beta[0] part_beta[1] part_beta[2] part_beta[k-1] * * * In the second step, we sum up the row-values in part_gamma and part_beta. * * This `LayerNormFusedBackwardKernel_PartGammaBeta` function implements the first step and * `LayerNormFusedBackwardKernel_GammaBeta` implements the second step. */ template<typename AType, typename DType> __global__ void LayerNormFusedBackwardKernel_PartGammaBeta(const int nbatch, const int nchannel, const DType* __restrict__ in_data, const DType* __restrict__ out_grad, const DType* __restrict__ mean_data, const DType* __restrict__ std_data, AType* __restrict__ part_gamma_grad, AType* __restrict__ part_beta_grad) { extern __shared__ char buf[]; AType* d_buf = reinterpret_cast<AType*>(buf); const int npart = gridDim.y; const int block_row_num = (nbatch + npart - 1) / npart; // The rows are divided into `npart` parts. Each threadblock calculates the reduction result // within the corresponding row ranges. int row_stride = blockDim.x + 1; const int c = blockIdx.x * blockDim.x + threadIdx.x; int r_begin = blockIdx.y * block_row_num; int r_end = min((blockIdx.y + 1) * block_row_num, nbatch); AType* buf_gamma_grad = d_buf; AType* buf_beta_grad = d_buf + blockDim.y * row_stride; AType local_gamma_grad = 0; AType local_beta_grad = 0; if (c < nchannel) { for (int r_b = r_begin; r_b < r_end; r_b += blockDim.y) { int r = r_b + threadIdx.y; if (r < r_end) { AType local_mean = static_cast<AType>(mean_data[r]); AType local_std = static_cast<AType>(std_data[r]); int read_idx = r * nchannel + c; AType local_in_data = static_cast<AType>(in_data[read_idx]); AType local_out_grad = static_cast<AType>(out_grad[read_idx]); local_gamma_grad += (local_in_data - local_mean) / local_std * local_out_grad; local_beta_grad += local_out_grad; } } } buf_gamma_grad[threadIdx.y * row_stride + threadIdx.x] = local_gamma_grad; buf_beta_grad[threadIdx.y * row_stride + threadIdx.x] = local_beta_grad; __syncthreads(); for (int offset = blockDim.y/2; offset > 1; offset >>= 1) { if (threadIdx.y < offset) { int idx1 = threadIdx.y * row_stride + threadIdx.x; int idx2 = (threadIdx.y + offset) * row_stride + threadIdx.x; buf_gamma_grad[idx1] += buf_gamma_grad[idx2]; buf_beta_grad[idx1] += buf_beta_grad[idx2]; } __syncthreads(); } if (threadIdx.y == 0 && c < nchannel) { part_gamma_grad[blockIdx.y * nchannel + c] = buf_gamma_grad[threadIdx.x] + buf_gamma_grad[threadIdx.x + row_stride]; part_beta_grad[blockIdx.y * nchannel + c] = buf_beta_grad[threadIdx.x] + buf_beta_grad[threadIdx.x + row_stride]; } } template<bool gamma_addto, bool beta_addto, typename AType, typename DType> __global__ void LayerNormFusedBackwardKernel_GammaBeta(const int nbatch, const int nchannel, const int npart, const AType* __restrict__ part_gamma_grad, const AType* __restrict__ part_beta_grad, DType* gamma_grad, DType* beta_grad) { const int c = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (c < nchannel) { extern __shared__ char buf[]; AType* buf_gamma_grad = reinterpret_cast<AType*>(buf); AType* buf_beta_grad = reinterpret_cast<AType*>(buf) + blockDim.x * blockDim.y; buf_gamma_grad[tid] = 0; buf_beta_grad[tid] = 0; for (int r = threadIdx.y; r < npart; r += blockDim.y) { buf_gamma_grad[tid] += part_gamma_grad[r * nchannel + c]; buf_beta_grad[tid] += part_beta_grad[r * nchannel + c]; } __syncthreads(); // Begin for inter-warp reduce if (npart > 1) { for (int offset = blockDim.y/2; offset > 0; offset >>= 1) { if (threadIdx.y < offset) { int idx1 = tid; int idx2 = tid + offset * blockDim.x; buf_gamma_grad[idx1] += buf_gamma_grad[idx2]; buf_beta_grad[idx1] += buf_beta_grad[idx2]; } __syncthreads(); } } if (threadIdx.y == 0) { if (gamma_grad) { if (gamma_addto) { gamma_grad[c] += static_cast<DType>(buf_gamma_grad[threadIdx.x]); } else { gamma_grad[c] = static_cast<DType>(buf_gamma_grad[threadIdx.x]); } } if (beta_grad) { if (beta_addto) { beta_grad[c] += static_cast<DType>(buf_beta_grad[threadIdx.x]); } else { beta_grad[c] = static_cast<DType>(buf_beta_grad[threadIdx.x]); } } } } } /* * * */ template<int LOAD_UNROLL, bool data_addto, typename AType, typename DType> __global__ void LayerNormFusedBackwardKernel_Data(const int nbatch, const int nchannel, const DType* __restrict__ in_data, const DType* __restrict__ out_grad, const DType* __restrict__ mean_data, const DType* __restrict__ std_data, const DType* __restrict__ gamma, DType* data_grad) { int bid = blockIdx.x + blockIdx.y * gridDim.x; const int nthread = blockDim.x * blockDim.y; if (bid < nbatch) { // Shared memory with size blockDim.y * blockDim.x * sizeof(DType) extern __shared__ char buf[]; int tid = threadIdx.x + threadIdx.y * blockDim.x; // 1. Calculate: mean(out_grad * gamma / std, axis=-1) // mean(out_grad * gamma / std * (x - mean) / std, axis=-1) AType sum_val0 = 0; // Stores mean(out_grad * gamma / std, axis=-1) AType sum_val1 = 0; // Stores mean(out_grad * gamma / std * (x - mean) / std, axis=-1) AType mean = static_cast<AType>(mean_data[bid]); AType invstd_eps = AType(1) / static_cast<AType>(std_data[bid]); int l = LOAD_UNROLL * tid; for (; l + LOAD_UNROLL - 1 < nchannel; l += nthread * LOAD_UNROLL) { #pragma unroll for (int i = 0; i < LOAD_UNROLL; ++i) { AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l + i]); AType ele_x = static_cast<AType>(in_data[bid * nchannel + l + i]); AType ele_gamma = static_cast<AType>(gamma[l + i]); sum_val0 += ele_og * ele_gamma * invstd_eps; sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps; } } for (; l < nchannel; ++l) { AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l]); AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]); AType ele_gamma = static_cast<AType>(gamma[l]); sum_val0 += ele_og * ele_gamma * invstd_eps; sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps; } // Intra-warp reduction (all-reduce) for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) { sum_val0 += warp_shfl_xor(sum_val0, mask); sum_val1 += warp_shfl_xor(sum_val1, mask); } // Inter-warp reduction (all-reduce) if (blockDim.y > 1) { AType* sum_val0_buf = reinterpret_cast<AType*>(buf); AType* sum_val1_buf = reinterpret_cast<AType*>(buf + blockDim.y / 2 * blockDim.x * sizeof(AType)); for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) { if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; sum_val0_buf[idx] = sum_val0; sum_val1_buf[idx] = sum_val1; } __syncthreads(); if (threadIdx.y < offset) { const int idx = threadIdx.y * blockDim.x + threadIdx.x; sum_val0 += sum_val0_buf[idx]; sum_val1 += sum_val1_buf[idx]; } __syncthreads(); } if (threadIdx.y == 0) { sum_val0_buf[threadIdx.x] = sum_val0; sum_val1_buf[threadIdx.x] = sum_val1; } __syncthreads(); sum_val0 = sum_val0_buf[threadIdx.x]; sum_val1 = sum_val1_buf[threadIdx.x]; } sum_val0 /= nchannel; sum_val1 /= nchannel; // 2. Calculate the gradient as // out_grad * gamma / std - sum_val0 - (x - mean) / std * sum_val1 for (int l = tid; l < nchannel; l += nthread) { AType ele_out_grad = static_cast<AType>(out_grad[bid * nchannel + l]); AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]); AType ele_gamma = static_cast<AType>(gamma[l]); if (data_addto) { data_grad[bid * nchannel + l] += static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps - sum_val0 - (ele_x - mean) * invstd_eps * sum_val1); } else { data_grad[bid * nchannel + l] = static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps - sum_val0 - (ele_x - mean) * invstd_eps * sum_val1); } } } } void GetGammaBetaGradKernelParams(const int nbatch, const int nchannel, dim3* part_grad_block_dim, dim3* part_grad_grid_dim, dim3* gb_block_dim, dim3* gb_grid_dim, int* npart) { *npart = 16; *part_grad_block_dim = dim3(32, 16); *part_grad_grid_dim = dim3((nchannel + 32 - 1) / 32, *npart); *gb_block_dim = dim3(32, *npart); *gb_grid_dim = dim3((nchannel + 32 - 1) / 32); CheckLaunchParam(*part_grad_grid_dim, *part_grad_block_dim); CheckLaunchParam(*gb_grid_dim, *gb_block_dim); } template<bool safe_acc = false> void LayerNormGradGPUContig(const LayerNormParam param, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 5U); const TBlob out_grad = inputs[0]; const TBlob in_data = inputs[1]; const TBlob gamma = inputs[2]; const TBlob mean_data = inputs[3]; const TBlob std_data = inputs[4]; const TBlob data_grad = outputs[0]; const TBlob gamma_grad = outputs[1]; const TBlob beta_grad = outputs[2]; // Make sure the inputs are contiguous CHECK_EQ(out_grad.CheckContiguous(), true); CHECK_EQ(in_data.CheckContiguous(), true); CHECK_EQ(gamma.CheckContiguous(), true); CHECK_EQ(mean_data.CheckContiguous(), true); CHECK_EQ(std_data.CheckContiguous(), true); int nbatch = in_data.shape_.ProdShape(0, in_data.ndim() - 1); int nchannel = in_data.shape_[in_data.ndim() - 1]; int data_grad_req = req[0]; int gamma_grad_req = req[1]; int beta_grad_req = req[2]; CHECK_NE(data_grad_req, kWriteInplace); CHECK_NE(gamma_grad_req, kWriteInplace); CHECK_NE(beta_grad_req, kWriteInplace); Stream<gpu> *s = ctx.get_stream<gpu>(); hipStream_t stream = Stream<gpu>::GetStream(s); // Calculate the gradient for gamma/beta CHECK_EQ(gamma_grad.CheckContiguous(), true); CHECK_EQ(beta_grad.CheckContiguous(), true); dim3 part_grad_block_dim, part_grad_grid_dim, gb_block_dim, gb_grid_dim; int npart; GetGammaBetaGradKernelParams(nbatch, nchannel, &part_grad_block_dim, &part_grad_grid_dim, &gb_block_dim, &gb_grid_dim, &npart); if (gamma_grad_req != kNullOp || beta_grad_req != kNullOp) { MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { typedef typename std::conditional<safe_acc, AccType, DType>::type AType; Tensor<gpu, 1, AType> workspace = ctx.requested[0].get_space_typed<gpu, 1, AType>(Shape1(2 * npart * nchannel), s); AType* part_gamma_grad_ptr = workspace.dptr_; AType* part_beta_grad_ptr = workspace.dptr_ + npart * nchannel; const int nshared_K1 = 2 * (part_grad_block_dim.x + 1) * part_grad_block_dim.y * sizeof(AType); const int nshared_K2 = 2 * gb_block_dim.x * gb_block_dim.y * sizeof(AType); DType* gamma_grad_ptr = (gamma_grad_req != kNullOp) ? gamma_grad.dptr<DType>() : nullptr; DType* beta_grad_ptr = (beta_grad_req != kNullOp) ? beta_grad.dptr<DType>() : nullptr; hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_PartGammaBeta) , dim3(part_grad_grid_dim), dim3(part_grad_block_dim), nshared_K1, stream, nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>(), part_gamma_grad_ptr, part_beta_grad_ptr); MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_PartGammaBeta); if (gamma_grad_req == kAddTo && beta_grad_req != kAddTo) { hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<true, false>) , dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream, nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr, gamma_grad_ptr, beta_grad_ptr); } else if (gamma_grad_req != kAddTo && beta_grad_req == kAddTo) { hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<false, true>) , dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream, nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr, gamma_grad_ptr, beta_grad_ptr); } else if (gamma_grad_req == kAddTo && beta_grad_req == kAddTo) { hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<true, true>) , dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream, nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr, gamma_grad_ptr, beta_grad_ptr); } else { hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<false, false>) , dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream, nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr, gamma_grad_ptr, beta_grad_ptr); } }); MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_GammaBeta); } // Calculate the gradient for data CHECK_EQ(data_grad.CheckContiguous(), true); int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch; int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1; const dim3 data_grid_dim(ngrid_x, ngrid_y); int nthread_y; if (nchannel <= 32) { nthread_y = 1; } else if (nchannel <= 128) { nthread_y = 2; } else if (nchannel <= 512) { nthread_y = 4; } else { nthread_y = 8; } const dim3 data_block_dim(32, nthread_y); const int LOAD_UNROLL = 4; if (data_grad_req != kNullOp) { MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { typedef typename std::conditional<safe_acc, AccType, DType>::type AType; int nshared = data_block_dim.y > 1 ? data_block_dim.y * data_block_dim.x * sizeof(AType) : 0; CheckLaunchParam(data_grid_dim, data_block_dim); if (data_grad_req == kAddTo) { hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, true, AType>) , dim3(data_grid_dim), dim3(data_block_dim), nshared, stream, nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>()); } else { hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, false, AType>) , dim3(data_grid_dim), dim3(data_block_dim), nshared, stream, nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>()); } }); MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_Data); } } template<> void LayerNormGradCompute<gpu>(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed); int axis = param.axis; if (axis < 0) { axis += static_cast<int>(inputs[0].ndim()); } CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis; if (axis == inputs[0].ndim() - 1) { // Use the accelerated CUDA kernels bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false); if (safe_acc) { return LayerNormGradGPUContig<true>(param, ctx, inputs, req, outputs); } else { return LayerNormGradGPUContig<false>(param, ctx, inputs, req, outputs); } } return LayerNormGradComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs); } NNVM_REGISTER_OP(LayerNorm) .set_attr<FCompute>("FCompute<gpu>", LayerNormCompute<gpu>); NNVM_REGISTER_OP(_backward_LayerNorm) .set_attr<FCompute>("FCompute<gpu>", LayerNormGradCompute<gpu>); } // namespace op } // namespace mxnet
e1b57d892468c1c93525b634c085a2e8daa92b20.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file layer_norm.cu * \brief Implements Ba et. al, Layer Normalization (https://arxiv.org/abs/1607.06450). */ #include "./layer_norm-inl.h" using namespace mshadow::cuda; namespace mxnet { namespace op { template <typename DType> __device__ __forceinline__ DType warp_shfl(DType value, int src_lane, int width = 32, unsigned int mask = 0xffffffff) { #if CUDA_VERSION >= 9000 return __shfl_sync(mask, value, src_lane, width); #else return __shfl(value, src_lane, width); #endif } template <typename DType> __device__ __forceinline__ DType warp_shfl_xor(DType value, int laneMask, int width = 32, unsigned int mask = 0xffffffff) { #if CUDA_VERSION >= 9000 return __shfl_xor_sync(mask, value, laneMask, width); #else return __shfl_xor(value, laneMask, width); #endif } /* A single updating step of the Welford's online algorithm to calculate the mean and variance. * The value 'curr' will be accumulated to the (mean, sigma2, count) triplet. * */ template<typename DType, typename IType> __device__ __forceinline__ void StepWelfordOnlineSum(const DType curr, DType& mean, //NOLINT DType& sigma2, //NOLINT IType& count) { //NOLINT count += IType(1); DType delta = curr - mean; mean += delta / count; sigma2 += delta * (curr - mean); } /* Merge the mean/variance of two partitions. It's the key step of the Chan's parallel algorithm. * The (lhs_mean, lhs_sigma2, lhs_count) will be merged into (rhs_mean, rhs_sigma2, rhs_count) * * See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance for more details. * * TODO(sxjscience) Explore the possibility of int lhs_count and rhs_count */ template<typename DType, typename IType> __device__ __inline__ void ChanMergePartition(const DType lhs_mean, const DType lhs_sigma2, const IType lhs_count, DType& rhs_mean, //NOLINT DType& rhs_sigma2, //NOLINT IType& rhs_count) { //NOLINT DType delta = rhs_mean - lhs_mean; DType nA = static_cast<DType>(lhs_count); DType nB = static_cast<DType>(rhs_count); rhs_count = nA + nB; if (rhs_count > DType(0)) { nA = nA / rhs_count; nB = nB / rhs_count; rhs_mean = nA * lhs_mean + nB * rhs_mean; rhs_sigma2 = rhs_sigma2 + lhs_sigma2 + delta * delta * nA * nB * rhs_count; } else { rhs_mean = DType(0); rhs_sigma2 = DType(0); } } /* Split the input column into multiple partitions and compute the mean/sigma of each partition. * Each thread will keep a mean/sigma2. The mean/sigma2 can be further merged to get the mean and * sigma2 of the column. */ template<typename AType, typename DType, typename IType> __device__ __forceinline__ void BlockWelfordOnlineSum(const DType* __restrict__ col_vals, const int nchannel, AType& mean, //NOLINT AType& sigma2, //NOLINT IType& count) { //NOLINT int tid = threadIdx.x + threadIdx.y * blockDim.x; const int nthread = blockDim.x * blockDim.y; // Each thread takes charge of 4 consecutive numbers. This should optimize the loading speed using // vectorized types like float4. // Also, to minimize branch divergence, we split the for-loop into two parts. int l = 4 * tid; for (; l + 3 < nchannel; l += 4 * nthread) { #pragma unroll for (int i = 0; i < 4; ++i) { StepWelfordOnlineSum(static_cast<AType>(col_vals[l + i]), mean, sigma2, count); } } for (; l < nchannel; ++l) { StepWelfordOnlineSum(static_cast<AType>(col_vals[l]), mean, sigma2, count); } } template<> __device__ __forceinline__ void BlockWelfordOnlineSum<float, mshadow::half::half_t, int> (const mshadow::half::half_t* __restrict__ col_vals, const int nchannel, float& mean, //NOLINT float& sigma2, //NOLINT int& count) { //NOLINT int tid = threadIdx.x + threadIdx.y * blockDim.x; const int nthread = blockDim.x * blockDim.y; // We cast the input half pointer to half2 to optimize the loading speed. // Here, we need to notice that CUDA forces memory alignment, i.e., // ASSERT static_cast<size_t>(ptr) % sizeof(dtype) == 0. // Thus, we need to shift the address of the half pointer to be aligned by half2. int align_shift = (reinterpret_cast<size_t>(col_vals) % 4) != 0; int padding = (nchannel - align_shift) % 2; int half2_size = (nchannel - align_shift) / 2; const __half2* half2_col_vals = reinterpret_cast<const __half2*>(col_vals + align_shift); if (threadIdx.x == 0 && threadIdx.y == 0) { if (align_shift) { StepWelfordOnlineSum(__half2float(col_vals[0].cuhalf_), mean, sigma2, count); } if (padding) { StepWelfordOnlineSum(__half2float(col_vals[nchannel - 1].cuhalf_), mean, sigma2, count); } } for (int l = tid; l < half2_size; l += nthread) { float2 ele_val = __half22float2(half2_col_vals[l]); StepWelfordOnlineSum(ele_val.x, mean, sigma2, count); StepWelfordOnlineSum(ele_val.y, mean, sigma2, count); } } /* Fused CUDA kernel for the forward pass of layer normalization. * It computes the LayerNorm when axis=-1, i.e., contiguous reduction scenario. * Shape of the input tensors: * in_data = (nbatch, nchannel) * gamma = (nchannel,) * beta = (nchannel,) * out_data = (nchannel,) * mean_data = (nbatch,) * var_data = (nbatch,) * It's always launched with (blockDim.x, blockDim.y) = (WARP_SIZE, blockDim.y) * Also, when blockDim.y > 1, it requires shared memory that has size: * sizeof(AType) * blockDim.y + sizeof(int) * blockDim.y / 2 */ template<typename AType, typename DType, typename IType> __global__ void LayerNormFusedForwardKernelContig(const int nbatch, const int nchannel, const AType eps, const DType* __restrict__ in_data, const DType* __restrict__ gamma, const DType* __restrict__ beta, DType* __restrict__ out_data, DType* __restrict__ mean_data, DType* __restrict__ std_data) { int bid = blockIdx.x + blockIdx.y * gridDim.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int nthread = blockDim.x * blockDim.y; IType count = 0; AType mean = 0; AType sigma2 = 0; if (bid < nbatch) { extern __shared__ char buf[]; // Shared memory const DType* col_vals = in_data + bid * nchannel; BlockWelfordOnlineSum(col_vals, nchannel, mean, sigma2, count); // Merge the mean/sigma2 within a warp // Use the Chan's Parallel Algorithm to merge all (mean, sigma2, counts) // within a warp of threads. // After calling the function, threadIdx.x == 0 will store the result of // the aggregated (mean, sigma2, counts). for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) { AType meanB = warp_shfl_xor(mean, mask); AType sigma2B = warp_shfl_xor(sigma2, mask); IType countB = warp_shfl_xor(count, mask); ChanMergePartition(meanB, sigma2B, countB, mean, sigma2, count); } if (blockDim.y > 1) { // Inter-warp reduction. Copy the upper-half of the warps to shared memory // and merge with the lower-half warp AType* mean_buf = reinterpret_cast<AType*>(buf); AType* sigma2_buf = reinterpret_cast<AType*>(buf + sizeof(AType) * blockDim.y / 2 * blockDim.x); IType* count_buf = reinterpret_cast<IType*>(buf + sizeof(AType) * blockDim.y * blockDim.x); for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) { if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; mean_buf[idx] = mean; sigma2_buf[idx] = sigma2; count_buf[idx] = count; } __syncthreads(); if (threadIdx.y < offset) { const int idx = threadIdx.y * blockDim.x + threadIdx.x; ChanMergePartition(mean_buf[idx], sigma2_buf[idx], count_buf[idx], mean, sigma2, count); } __syncthreads(); } // Broadcast the result to all threads if (threadIdx.y == 0) { mean_buf[threadIdx.x] = mean; sigma2_buf[threadIdx.x] = sigma2; } __syncthreads(); mean = mean_buf[threadIdx.x]; sigma2 = sigma2_buf[threadIdx.x] / nchannel; } else { sigma2 /= nchannel; } // Calculate the out_data: gamma * (x - mean) / sqrt(var + eps) + beta AType std_eps = sqrt(sigma2 + eps); AType invstd_eps = DType(1.0) / std_eps; DType* out_col_val = out_data + bid * nchannel; if (gamma != NULL && beta != NULL) { for (int i = tid; i < nchannel; i += nthread) { out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean)) + beta[i]; } } else if (gamma == NULL && beta != NULL) { for (int i = tid; i < nchannel; i += nthread) { out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean)) + beta[i]; } } else if (gamma != NULL && beta == NULL) { for (int i = tid; i < nchannel; i += nthread) { out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean)); } } else { for (int i = tid; i < nchannel; i += nthread) { out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean)); } } // Write the out_data and var_data if (threadIdx.x == 0 && threadIdx.y == 0) { mean_data[bid] = static_cast<DType>(mean); std_data[bid] = static_cast<DType>(std_eps); } } } template<bool safe_acc = false> void LayerNormGPUContig(const LayerNormParam param, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 3U); mxnet::TShape data_shape(2, 0); mxnet::TShape mean_shape(1, 0); size_t in_ndim = inputs[layernorm::kData].ndim(); data_shape[0] = mean_shape[0] = inputs[layernorm::kData].shape_.ProdShape(0, in_ndim - 1); data_shape[1] = inputs[layernorm::kData].shape_[in_ndim - 1]; const TBlob in_data = inputs[layernorm::kData].reshape(data_shape); const TBlob gamma = inputs[layernorm::kGamma]; const TBlob beta = inputs[layernorm::kBeta]; const TBlob out_data = outputs[layernorm::kOut].reshape(data_shape); const TBlob mean_data = outputs[layernorm::kMean].reshape(mean_shape); const TBlob std_data = outputs[layernorm::kStd].reshape(mean_shape); // Make sure the inputs are contiguous CHECK_EQ(in_data.CheckContiguous(), true); CHECK_EQ(gamma.CheckContiguous(), true); CHECK_EQ(beta.CheckContiguous(), true); CHECK_EQ(out_data.CheckContiguous(), true); CHECK_EQ(mean_data.CheckContiguous(), true); CHECK_EQ(std_data.CheckContiguous(), true); // Lauch the kernel. The dynamic shared memory size is // sizeof(DType) * blockDim.y * blockDim.x + sizeof(DType) * blockDim.y / 2 * blockDim.x int nbatch = data_shape[0]; int nchannel = data_shape[1]; float eps = param.eps; int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch; int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1; int nthread_y; const dim3 dimGrid(ngrid_x, ngrid_y); if (nchannel <= 128) { nthread_y = 1; } else if (nchannel <= 512) { nthread_y = 2; } else { nthread_y = 4; } cudaStream_t stream = Stream<gpu>::GetStream(ctx.get_stream<gpu>()); const dim3 dimBlock(32, nthread_y); MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { typedef typename std::conditional<safe_acc, AccType, DType>::type AType; int nshared = nthread_y > 1 ? nthread_y * 32 * sizeof(AType) + (nthread_y / 2) * 32 * sizeof(int) : 0; CheckLaunchParam(dimGrid, dimBlock); LayerNormFusedForwardKernelContig<AType, DType, int> <<<dimGrid, dimBlock, nshared, stream>>> (nbatch, nchannel, static_cast<AType>(eps), in_data.dptr<DType>(), gamma.dptr<DType>(), beta.dptr<DType>(), out_data.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>()); }); MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedForwardKernelContig); } template<> void LayerNormCompute<gpu>(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed); if (req[0] == kNullOp) return; CHECK_NE(req[0], kAddTo); int axis = param.axis; if (axis < 0) { axis += static_cast<int>(inputs[0].ndim()); } CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis; if (axis == inputs[0].ndim() - 1) { // Try to use the accelerated CUDA kernels bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false); if (safe_acc) { return LayerNormGPUContig<true>(param, ctx, inputs, req, outputs); } else { return LayerNormGPUContig<false>(param, ctx, inputs, req, outputs); } } return LayerNormComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs); } /* Fused CUDA kernel for calculating the gradient w.r.t gamma/beta in LayerNorm when axis=-1 * (Contiguous case). * The gradient of gamma and beta are: * d_gamma = sum(out_grad * (x - mean) / std, axis=0) * d_beta = sum(out_grad, axis=0) * * We compute the gradient (mainly reduction over a non-contiguous axis) using two steps to * improve the parallelism. * * In the first step, we divide the rows uniformly into K parts. K independent threadblocks are used * to calculate the partial reduction result of each part. Illustrated below: * * 1st Block 2nd Block 3rd Block k-th Block * | --------------- | ---------------- | --------------- | ... | ---------------- | * | --------------- | ---------------- | --------------- | ... | ---------------- | * | --------------- | ---------------- | --------------- | ... | ---------------- | * | --------------- | ---------------- | --------------- | ... | ---------------- | * part_gamma[0] part_gamma[1] part_gamma[2] part_gamma[k-1] * part_beta[0] part_beta[1] part_beta[2] part_beta[k-1] * * * In the second step, we sum up the row-values in part_gamma and part_beta. * * This `LayerNormFusedBackwardKernel_PartGammaBeta` function implements the first step and * `LayerNormFusedBackwardKernel_GammaBeta` implements the second step. */ template<typename AType, typename DType> __global__ void LayerNormFusedBackwardKernel_PartGammaBeta(const int nbatch, const int nchannel, const DType* __restrict__ in_data, const DType* __restrict__ out_grad, const DType* __restrict__ mean_data, const DType* __restrict__ std_data, AType* __restrict__ part_gamma_grad, AType* __restrict__ part_beta_grad) { extern __shared__ char buf[]; AType* d_buf = reinterpret_cast<AType*>(buf); const int npart = gridDim.y; const int block_row_num = (nbatch + npart - 1) / npart; // The rows are divided into `npart` parts. Each threadblock calculates the reduction result // within the corresponding row ranges. int row_stride = blockDim.x + 1; const int c = blockIdx.x * blockDim.x + threadIdx.x; int r_begin = blockIdx.y * block_row_num; int r_end = min((blockIdx.y + 1) * block_row_num, nbatch); AType* buf_gamma_grad = d_buf; AType* buf_beta_grad = d_buf + blockDim.y * row_stride; AType local_gamma_grad = 0; AType local_beta_grad = 0; if (c < nchannel) { for (int r_b = r_begin; r_b < r_end; r_b += blockDim.y) { int r = r_b + threadIdx.y; if (r < r_end) { AType local_mean = static_cast<AType>(mean_data[r]); AType local_std = static_cast<AType>(std_data[r]); int read_idx = r * nchannel + c; AType local_in_data = static_cast<AType>(in_data[read_idx]); AType local_out_grad = static_cast<AType>(out_grad[read_idx]); local_gamma_grad += (local_in_data - local_mean) / local_std * local_out_grad; local_beta_grad += local_out_grad; } } } buf_gamma_grad[threadIdx.y * row_stride + threadIdx.x] = local_gamma_grad; buf_beta_grad[threadIdx.y * row_stride + threadIdx.x] = local_beta_grad; __syncthreads(); for (int offset = blockDim.y/2; offset > 1; offset >>= 1) { if (threadIdx.y < offset) { int idx1 = threadIdx.y * row_stride + threadIdx.x; int idx2 = (threadIdx.y + offset) * row_stride + threadIdx.x; buf_gamma_grad[idx1] += buf_gamma_grad[idx2]; buf_beta_grad[idx1] += buf_beta_grad[idx2]; } __syncthreads(); } if (threadIdx.y == 0 && c < nchannel) { part_gamma_grad[blockIdx.y * nchannel + c] = buf_gamma_grad[threadIdx.x] + buf_gamma_grad[threadIdx.x + row_stride]; part_beta_grad[blockIdx.y * nchannel + c] = buf_beta_grad[threadIdx.x] + buf_beta_grad[threadIdx.x + row_stride]; } } template<bool gamma_addto, bool beta_addto, typename AType, typename DType> __global__ void LayerNormFusedBackwardKernel_GammaBeta(const int nbatch, const int nchannel, const int npart, const AType* __restrict__ part_gamma_grad, const AType* __restrict__ part_beta_grad, DType* gamma_grad, DType* beta_grad) { const int c = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (c < nchannel) { extern __shared__ char buf[]; AType* buf_gamma_grad = reinterpret_cast<AType*>(buf); AType* buf_beta_grad = reinterpret_cast<AType*>(buf) + blockDim.x * blockDim.y; buf_gamma_grad[tid] = 0; buf_beta_grad[tid] = 0; for (int r = threadIdx.y; r < npart; r += blockDim.y) { buf_gamma_grad[tid] += part_gamma_grad[r * nchannel + c]; buf_beta_grad[tid] += part_beta_grad[r * nchannel + c]; } __syncthreads(); // Begin for inter-warp reduce if (npart > 1) { for (int offset = blockDim.y/2; offset > 0; offset >>= 1) { if (threadIdx.y < offset) { int idx1 = tid; int idx2 = tid + offset * blockDim.x; buf_gamma_grad[idx1] += buf_gamma_grad[idx2]; buf_beta_grad[idx1] += buf_beta_grad[idx2]; } __syncthreads(); } } if (threadIdx.y == 0) { if (gamma_grad) { if (gamma_addto) { gamma_grad[c] += static_cast<DType>(buf_gamma_grad[threadIdx.x]); } else { gamma_grad[c] = static_cast<DType>(buf_gamma_grad[threadIdx.x]); } } if (beta_grad) { if (beta_addto) { beta_grad[c] += static_cast<DType>(buf_beta_grad[threadIdx.x]); } else { beta_grad[c] = static_cast<DType>(buf_beta_grad[threadIdx.x]); } } } } } /* * * */ template<int LOAD_UNROLL, bool data_addto, typename AType, typename DType> __global__ void LayerNormFusedBackwardKernel_Data(const int nbatch, const int nchannel, const DType* __restrict__ in_data, const DType* __restrict__ out_grad, const DType* __restrict__ mean_data, const DType* __restrict__ std_data, const DType* __restrict__ gamma, DType* data_grad) { int bid = blockIdx.x + blockIdx.y * gridDim.x; const int nthread = blockDim.x * blockDim.y; if (bid < nbatch) { // Shared memory with size blockDim.y * blockDim.x * sizeof(DType) extern __shared__ char buf[]; int tid = threadIdx.x + threadIdx.y * blockDim.x; // 1. Calculate: mean(out_grad * gamma / std, axis=-1) // mean(out_grad * gamma / std * (x - mean) / std, axis=-1) AType sum_val0 = 0; // Stores mean(out_grad * gamma / std, axis=-1) AType sum_val1 = 0; // Stores mean(out_grad * gamma / std * (x - mean) / std, axis=-1) AType mean = static_cast<AType>(mean_data[bid]); AType invstd_eps = AType(1) / static_cast<AType>(std_data[bid]); int l = LOAD_UNROLL * tid; for (; l + LOAD_UNROLL - 1 < nchannel; l += nthread * LOAD_UNROLL) { #pragma unroll for (int i = 0; i < LOAD_UNROLL; ++i) { AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l + i]); AType ele_x = static_cast<AType>(in_data[bid * nchannel + l + i]); AType ele_gamma = static_cast<AType>(gamma[l + i]); sum_val0 += ele_og * ele_gamma * invstd_eps; sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps; } } for (; l < nchannel; ++l) { AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l]); AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]); AType ele_gamma = static_cast<AType>(gamma[l]); sum_val0 += ele_og * ele_gamma * invstd_eps; sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps; } // Intra-warp reduction (all-reduce) for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) { sum_val0 += warp_shfl_xor(sum_val0, mask); sum_val1 += warp_shfl_xor(sum_val1, mask); } // Inter-warp reduction (all-reduce) if (blockDim.y > 1) { AType* sum_val0_buf = reinterpret_cast<AType*>(buf); AType* sum_val1_buf = reinterpret_cast<AType*>(buf + blockDim.y / 2 * blockDim.x * sizeof(AType)); for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) { if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; sum_val0_buf[idx] = sum_val0; sum_val1_buf[idx] = sum_val1; } __syncthreads(); if (threadIdx.y < offset) { const int idx = threadIdx.y * blockDim.x + threadIdx.x; sum_val0 += sum_val0_buf[idx]; sum_val1 += sum_val1_buf[idx]; } __syncthreads(); } if (threadIdx.y == 0) { sum_val0_buf[threadIdx.x] = sum_val0; sum_val1_buf[threadIdx.x] = sum_val1; } __syncthreads(); sum_val0 = sum_val0_buf[threadIdx.x]; sum_val1 = sum_val1_buf[threadIdx.x]; } sum_val0 /= nchannel; sum_val1 /= nchannel; // 2. Calculate the gradient as // out_grad * gamma / std - sum_val0 - (x - mean) / std * sum_val1 for (int l = tid; l < nchannel; l += nthread) { AType ele_out_grad = static_cast<AType>(out_grad[bid * nchannel + l]); AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]); AType ele_gamma = static_cast<AType>(gamma[l]); if (data_addto) { data_grad[bid * nchannel + l] += static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps - sum_val0 - (ele_x - mean) * invstd_eps * sum_val1); } else { data_grad[bid * nchannel + l] = static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps - sum_val0 - (ele_x - mean) * invstd_eps * sum_val1); } } } } void GetGammaBetaGradKernelParams(const int nbatch, const int nchannel, dim3* part_grad_block_dim, dim3* part_grad_grid_dim, dim3* gb_block_dim, dim3* gb_grid_dim, int* npart) { *npart = 16; *part_grad_block_dim = dim3(32, 16); *part_grad_grid_dim = dim3((nchannel + 32 - 1) / 32, *npart); *gb_block_dim = dim3(32, *npart); *gb_grid_dim = dim3((nchannel + 32 - 1) / 32); CheckLaunchParam(*part_grad_grid_dim, *part_grad_block_dim); CheckLaunchParam(*gb_grid_dim, *gb_block_dim); } template<bool safe_acc = false> void LayerNormGradGPUContig(const LayerNormParam param, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 5U); const TBlob out_grad = inputs[0]; const TBlob in_data = inputs[1]; const TBlob gamma = inputs[2]; const TBlob mean_data = inputs[3]; const TBlob std_data = inputs[4]; const TBlob data_grad = outputs[0]; const TBlob gamma_grad = outputs[1]; const TBlob beta_grad = outputs[2]; // Make sure the inputs are contiguous CHECK_EQ(out_grad.CheckContiguous(), true); CHECK_EQ(in_data.CheckContiguous(), true); CHECK_EQ(gamma.CheckContiguous(), true); CHECK_EQ(mean_data.CheckContiguous(), true); CHECK_EQ(std_data.CheckContiguous(), true); int nbatch = in_data.shape_.ProdShape(0, in_data.ndim() - 1); int nchannel = in_data.shape_[in_data.ndim() - 1]; int data_grad_req = req[0]; int gamma_grad_req = req[1]; int beta_grad_req = req[2]; CHECK_NE(data_grad_req, kWriteInplace); CHECK_NE(gamma_grad_req, kWriteInplace); CHECK_NE(beta_grad_req, kWriteInplace); Stream<gpu> *s = ctx.get_stream<gpu>(); cudaStream_t stream = Stream<gpu>::GetStream(s); // Calculate the gradient for gamma/beta CHECK_EQ(gamma_grad.CheckContiguous(), true); CHECK_EQ(beta_grad.CheckContiguous(), true); dim3 part_grad_block_dim, part_grad_grid_dim, gb_block_dim, gb_grid_dim; int npart; GetGammaBetaGradKernelParams(nbatch, nchannel, &part_grad_block_dim, &part_grad_grid_dim, &gb_block_dim, &gb_grid_dim, &npart); if (gamma_grad_req != kNullOp || beta_grad_req != kNullOp) { MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { typedef typename std::conditional<safe_acc, AccType, DType>::type AType; Tensor<gpu, 1, AType> workspace = ctx.requested[0].get_space_typed<gpu, 1, AType>(Shape1(2 * npart * nchannel), s); AType* part_gamma_grad_ptr = workspace.dptr_; AType* part_beta_grad_ptr = workspace.dptr_ + npart * nchannel; const int nshared_K1 = 2 * (part_grad_block_dim.x + 1) * part_grad_block_dim.y * sizeof(AType); const int nshared_K2 = 2 * gb_block_dim.x * gb_block_dim.y * sizeof(AType); DType* gamma_grad_ptr = (gamma_grad_req != kNullOp) ? gamma_grad.dptr<DType>() : nullptr; DType* beta_grad_ptr = (beta_grad_req != kNullOp) ? beta_grad.dptr<DType>() : nullptr; LayerNormFusedBackwardKernel_PartGammaBeta <<<part_grad_grid_dim, part_grad_block_dim, nshared_K1, stream>>> (nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>(), part_gamma_grad_ptr, part_beta_grad_ptr); MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_PartGammaBeta); if (gamma_grad_req == kAddTo && beta_grad_req != kAddTo) { LayerNormFusedBackwardKernel_GammaBeta<true, false> <<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>> (nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr, gamma_grad_ptr, beta_grad_ptr); } else if (gamma_grad_req != kAddTo && beta_grad_req == kAddTo) { LayerNormFusedBackwardKernel_GammaBeta<false, true> <<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>> (nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr, gamma_grad_ptr, beta_grad_ptr); } else if (gamma_grad_req == kAddTo && beta_grad_req == kAddTo) { LayerNormFusedBackwardKernel_GammaBeta<true, true> <<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>> (nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr, gamma_grad_ptr, beta_grad_ptr); } else { LayerNormFusedBackwardKernel_GammaBeta<false, false> <<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>> (nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr, gamma_grad_ptr, beta_grad_ptr); } }); MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_GammaBeta); } // Calculate the gradient for data CHECK_EQ(data_grad.CheckContiguous(), true); int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch; int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1; const dim3 data_grid_dim(ngrid_x, ngrid_y); int nthread_y; if (nchannel <= 32) { nthread_y = 1; } else if (nchannel <= 128) { nthread_y = 2; } else if (nchannel <= 512) { nthread_y = 4; } else { nthread_y = 8; } const dim3 data_block_dim(32, nthread_y); const int LOAD_UNROLL = 4; if (data_grad_req != kNullOp) { MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { typedef typename std::conditional<safe_acc, AccType, DType>::type AType; int nshared = data_block_dim.y > 1 ? data_block_dim.y * data_block_dim.x * sizeof(AType) : 0; CheckLaunchParam(data_grid_dim, data_block_dim); if (data_grad_req == kAddTo) { LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, true, AType> <<<data_grid_dim, data_block_dim, nshared, stream>>> (nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>()); } else { LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, false, AType> <<<data_grid_dim, data_block_dim, nshared, stream>>> (nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>()); } }); MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_Data); } } template<> void LayerNormGradCompute<gpu>(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed); int axis = param.axis; if (axis < 0) { axis += static_cast<int>(inputs[0].ndim()); } CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis; if (axis == inputs[0].ndim() - 1) { // Use the accelerated CUDA kernels bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false); if (safe_acc) { return LayerNormGradGPUContig<true>(param, ctx, inputs, req, outputs); } else { return LayerNormGradGPUContig<false>(param, ctx, inputs, req, outputs); } } return LayerNormGradComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs); } NNVM_REGISTER_OP(LayerNorm) .set_attr<FCompute>("FCompute<gpu>", LayerNormCompute<gpu>); NNVM_REGISTER_OP(_backward_LayerNorm) .set_attr<FCompute>("FCompute<gpu>", LayerNormGradCompute<gpu>); } // namespace op } // namespace mxnet
fd520706cbcdcc17a53f8f1e94d10d61da9e8f70.hip
// !!! This is a file automatically generated by hipify!!! #include"gpu_predict_test.h" void testInitArray(void) { real *c_vec, *d_vec; real val; int len; int i, test, error; c_vec = (real *)malloc( int(sizeof(real) * 1e7) ); hipMalloc( (void **)&d_vec, int(sizeof(real) * 1e7) ); for( test = 0; test < 6; ++test ) { val = pow( 10.0, test-5); len = pow( 10.0, test ); gpu_init_array(d_vec, val, len); hipMemcpy(d_vec, c_vec, sizeof(real) * len, hipMemcpyDeviceToHost); error = 0; for( i = 0; i < len; ++i ) { if( ( c_vec[i] - val ) / val > 1e-15 ) error++; } CU_ASSERT(error == 0); } free(c_vec); hipFree(d_vec); }
fd520706cbcdcc17a53f8f1e94d10d61da9e8f70.cu
#include"gpu_predict_test.h" void testInitArray(void) { real *c_vec, *d_vec; real val; int len; int i, test, error; c_vec = (real *)malloc( int(sizeof(real) * 1e7) ); cudaMalloc( (void **)&d_vec, int(sizeof(real) * 1e7) ); for( test = 0; test < 6; ++test ) { val = pow( 10.0, test-5); len = pow( 10.0, test ); gpu_init_array(d_vec, val, len); cudaMemcpy(d_vec, c_vec, sizeof(real) * len, cudaMemcpyDeviceToHost); error = 0; for( i = 0; i < len; ++i ) { if( ( c_vec[i] - val ) / val > 1e-15 ) error++; } CU_ASSERT(error == 0); } free(c_vec); cudaFree(d_vec); }
c2c405214e6e0ff44583b331dd2e09f240e4382e.hip
// !!! This is a file automatically generated by hipify!!! #include <gauge_field.h> #include <color_spinor_field.h> #include <clover_field.h> #include <dslash.h> #include <worker.h> #include <dslash_policy.cuh> #include <kernels/dslash_wilson_clover.cuh> /** This is the basic gauged twisted-clover operator */ namespace quda { /** @brief This is a helper class that is used to instantiate the correct templated kernel for the dslash. */ template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg> struct TwistedCloverLaunch { static constexpr const char *kernel = "quda::wilsonCloverGPU"; // kernel name for jit compilation template <typename Dslash> inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const hipStream_t &stream) { static_assert(xpay == true, "Twisted-clover operator only defined for xpay"); dslash.launch(wilsonCloverGPU<Float, nDim, nColor, nParity, dagger, xpay, kernel_type, Arg>, tp, arg, stream); } }; template <typename Float, int nDim, int nColor, typename Arg> class TwistedClover : public Dslash<Float> { protected: Arg &arg; const ColorSpinorField &in; public: TwistedClover(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) : Dslash<Float>(arg, out, in, "kernels/dslash_wilson_clover.cuh"), arg(arg), in(in) { } virtual ~TwistedClover() {} void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); Dslash<Float>::setParam(arg); if (arg.xpay) Dslash<Float>::template instantiate<TwistedCloverLaunch, nDim, nColor, true>(tp, arg, stream); else errorQuda("Twisted-clover operator only defined for xpay=true"); } long long flops() const { int clover_flops = 504 + 48; long long flops = Dslash<Float>::flops(); switch (arg.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; // all clover flops are in the interior kernel case INTERIOR_KERNEL: case KERNEL_POLICY: flops += clover_flops * in.Volume(); break; } return flops; } long long bytes() const { bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false; int clover_bytes = 72 * in.Precision() + (isFixed ? 2 * sizeof(float) : 0); long long bytes = Dslash<Float>::bytes(); switch (arg.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: bytes += clover_bytes * in.Volume(); break; } return bytes; } TuneKey tuneKey() const { return TuneKey(in.VolString(), typeid(*this).name(), Dslash<Float>::aux[arg.kernel_type]); } }; template <typename Float, int nColor, QudaReconstructType recon> struct TwistedCloverApply { inline TwistedCloverApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, const CloverField &C, double a, double b, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile) { constexpr int nDim = 4; WilsonCloverArg<Float, nColor, recon, true> arg(out, in, U, C, a, b, x, parity, dagger, comm_override); TwistedClover<Float, nDim, nColor, WilsonCloverArg<Float, nColor, recon, true>> twisted(arg, out, in); dslash::DslashPolicyTune<decltype(twisted)> policy( twisted, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(), in.GhostFaceCB(), profile); policy.apply(0); checkCudaError(); } }; // Apply the twisted-mass Dslash operator // out(x) = M*in = (A + i*b*gamma_5)*in(x) + a*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu) // Uses the kappa normalization for the Wilson operator, with a = -kappa. void ApplyTwistedClover(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, const CloverField &C, double a, double b, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile) { #ifdef GPU_TWISTED_CLOVER_DIRAC if (in.V() == out.V()) errorQuda("Aliasing pointers"); if (in.FieldOrder() != out.FieldOrder()) errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder()); // check all precisions match checkPrecision(out, in, U, C); // check all locations match checkLocation(out, in, U, C); instantiate<TwistedCloverApply>(out, in, U, C, a, b, x, parity, dagger, comm_override, profile); #else errorQuda("Twisted-clover dslash has not been built"); #endif // GPU_TWISTED_CLOVEr_DIRAC } } // namespace quda
c2c405214e6e0ff44583b331dd2e09f240e4382e.cu
#include <gauge_field.h> #include <color_spinor_field.h> #include <clover_field.h> #include <dslash.h> #include <worker.h> #include <dslash_policy.cuh> #include <kernels/dslash_wilson_clover.cuh> /** This is the basic gauged twisted-clover operator */ namespace quda { /** @brief This is a helper class that is used to instantiate the correct templated kernel for the dslash. */ template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg> struct TwistedCloverLaunch { static constexpr const char *kernel = "quda::wilsonCloverGPU"; // kernel name for jit compilation template <typename Dslash> inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const cudaStream_t &stream) { static_assert(xpay == true, "Twisted-clover operator only defined for xpay"); dslash.launch(wilsonCloverGPU<Float, nDim, nColor, nParity, dagger, xpay, kernel_type, Arg>, tp, arg, stream); } }; template <typename Float, int nDim, int nColor, typename Arg> class TwistedClover : public Dslash<Float> { protected: Arg &arg; const ColorSpinorField &in; public: TwistedClover(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) : Dslash<Float>(arg, out, in, "kernels/dslash_wilson_clover.cuh"), arg(arg), in(in) { } virtual ~TwistedClover() {} void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); Dslash<Float>::setParam(arg); if (arg.xpay) Dslash<Float>::template instantiate<TwistedCloverLaunch, nDim, nColor, true>(tp, arg, stream); else errorQuda("Twisted-clover operator only defined for xpay=true"); } long long flops() const { int clover_flops = 504 + 48; long long flops = Dslash<Float>::flops(); switch (arg.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; // all clover flops are in the interior kernel case INTERIOR_KERNEL: case KERNEL_POLICY: flops += clover_flops * in.Volume(); break; } return flops; } long long bytes() const { bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false; int clover_bytes = 72 * in.Precision() + (isFixed ? 2 * sizeof(float) : 0); long long bytes = Dslash<Float>::bytes(); switch (arg.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: bytes += clover_bytes * in.Volume(); break; } return bytes; } TuneKey tuneKey() const { return TuneKey(in.VolString(), typeid(*this).name(), Dslash<Float>::aux[arg.kernel_type]); } }; template <typename Float, int nColor, QudaReconstructType recon> struct TwistedCloverApply { inline TwistedCloverApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, const CloverField &C, double a, double b, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile) { constexpr int nDim = 4; WilsonCloverArg<Float, nColor, recon, true> arg(out, in, U, C, a, b, x, parity, dagger, comm_override); TwistedClover<Float, nDim, nColor, WilsonCloverArg<Float, nColor, recon, true>> twisted(arg, out, in); dslash::DslashPolicyTune<decltype(twisted)> policy( twisted, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(), in.GhostFaceCB(), profile); policy.apply(0); checkCudaError(); } }; // Apply the twisted-mass Dslash operator // out(x) = M*in = (A + i*b*gamma_5)*in(x) + a*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu) // Uses the kappa normalization for the Wilson operator, with a = -kappa. void ApplyTwistedClover(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, const CloverField &C, double a, double b, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile) { #ifdef GPU_TWISTED_CLOVER_DIRAC if (in.V() == out.V()) errorQuda("Aliasing pointers"); if (in.FieldOrder() != out.FieldOrder()) errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder()); // check all precisions match checkPrecision(out, in, U, C); // check all locations match checkLocation(out, in, U, C); instantiate<TwistedCloverApply>(out, in, U, C, a, b, x, parity, dagger, comm_override, profile); #else errorQuda("Twisted-clover dslash has not been built"); #endif // GPU_TWISTED_CLOVEr_DIRAC } } // namespace quda
bf372439a553baccf010266235000ce93ccd13a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <inttypes.h> #include <math.h> #include <stdlib.h> #include <stdio.h> //#include <time.h> #include <sys/time.h> #include <sys/types.h> #include <dirent.h> #include <unistd.h> #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) #define MAX_STR_LEN 256 struct ponto_capturado{ int TID; char *clazz; int time; double lat, lon; int gid; int stopId; }; struct trajetoria{ ponto_capturado** pontos; int qntdPontos; }; __device__ double euclidean(double *p1, double *p2); __global__ void msm(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors); void msm_sequential(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors); double euclidean_local(double *p1, double *p2); trajetoria** trajetorias; trajetoria* readTrajFile(char*); double* trajectoryRawer(trajetoria*); double distance(double*, int, double*, int); double distance_sequential(double*, int, double*, int); int main(int argc, char *argv[]) { int file_count = 0; int len; DIR * dirp; struct dirent * entry; dirp = opendir("./trajetorias"); while ((entry = readdir(dirp)) != NULL) { len = strlen (entry->d_name); if (entry->d_type == DT_REG && strcmp (".traj", &(entry->d_name[len - 5])) == 0) { /* If the entry is a regular file */ file_count++; } } closedir(dirp); trajetorias = (trajetoria**) malloc(file_count*sizeof(trajetoria*)); DIR* FD; struct dirent* in_file; if (NULL == (FD = opendir ("./trajetorias"))) { fprintf(stderr, "Error : Failed to open input directory\n"); return 1; } int fileCounter = 0; while ((in_file = readdir(FD))) { len = strlen (in_file->d_name); if (len > 4 && in_file->d_type == DT_REG && strcmp (".traj", &(in_file->d_name[len - 5])) == 0) { if (!strcmp (in_file->d_name, ".")) continue; if (!strcmp (in_file->d_name, "..")) continue; char filePath[1024]; sprintf( filePath, "%s/%s", "./trajetorias", in_file->d_name ); trajetorias[fileCounter++] = readTrajFile(filePath); } } printf("Qntd arquivos lidos %d\n", file_count); double** allDistances = (double**) malloc(file_count*sizeof(double*)); double** rawTrajs = (double**) malloc(file_count*sizeof(double*)); for(int k = 0;k<file_count;k++) { rawTrajs[k] = trajectoryRawer(trajetorias[k]); } for(int k = 0;k<file_count;k++) { allDistances[k] = (double*) malloc(file_count*sizeof(double)); } printf("Trajetorias transformadas %d\n", file_count); struct timeval begin, end; printf("Executando algoritmo sequencial\n"); gettimeofday(&begin, NULL); for(int k = 0;k<file_count;k++) { allDistances[k][k] = 0.0; for(int l = 0;l<file_count;l++) { if(k<l) { double *trajA = rawTrajs[k]; double *trajB = rawTrajs[l]; double similarity = distance_sequential(trajA, trajetorias[k]->qntdPontos, trajB, trajetorias[l]->qntdPontos); allDistances[k][l] = similarity; allDistances[l][k] = similarity; } } } gettimeofday(&end, NULL); float cpuTime = 1000000*(float)(end.tv_sec - begin.tv_sec); cpuTime += (float)(end.tv_usec - begin.tv_usec); printf("Tempo de execuo para a construo da matriz de similaridade entre todas as trajetrias: %9.2f microssegundos\n", cpuTime); /* printf("Dados das tabelas de similaridade\nCPU:\n"); for(int i = 0; i < file_count;i++) { for(int j = 0; j < file_count;j++) { printf("%.2f, ", allDistances[i][j]); } printf("\n"); }*/ printf("Executando algoritmo acelerado por GPU\n"); gettimeofday(&begin, NULL); for(int k = 0;k<file_count;k++) { allDistances[k][k] = 0.0; for(int l = 0;l<file_count;l++) { if(k<l) { double *trajA = rawTrajs[k]; double *trajB = rawTrajs[l]; double similarity = distance(trajA, trajetorias[k]->qntdPontos, trajB, trajetorias[l]->qntdPontos); allDistances[k][l] = similarity; allDistances[l][k] = similarity; } } } gettimeofday(&end, NULL); float gpuTime = 1000000*(float)(end.tv_sec - begin.tv_sec); gpuTime += (float)(end.tv_usec - begin.tv_usec); printf("Tempo de execuo para a construo da matriz de similaridade entre todas as trajetrias: %9.2f microssegundos\n", gpuTime); /* printf("GPU:\n"); for(int i = 0; i < file_count;i++) { for(int j = 0; j < file_count;j++) { printf("%.2f, ", allDistances[i][j]); } printf("\n"); }*/ for(int i = 0; i < file_count;i++) { if(trajetorias[i]) { for(int j = 0; j < trajetorias[i]->qntdPontos;j++) { free(trajetorias[i]->pontos[j]); } free(trajetorias[i]); } } free(trajetorias); return 0; } trajetoria* readTrajFile(char *filePath) { /* FileStream for the Library File */ FILE *trajFile; /* allocation of the buffer for every line in the File */ char *buf = (char*) malloc(MAX_STR_LEN); char *tmp; /* if the space could not be allocaed, return an error */ if (buf == NULL) { printf ("No memory\n"); return NULL; } if ( ( trajFile = fopen( filePath, "r" ) ) == NULL ) //Reading a file { printf( "File could not be opened: %s.\n", filePath ); return NULL; } int pointsCounter = 0; while (fgets(buf, MAX_STR_LEN - 1, trajFile) != NULL) { pointsCounter++; } fclose(trajFile); ponto_capturado **traj = (ponto_capturado**) malloc(pointsCounter*sizeof(ponto_capturado*)); trajetoria* trajetoria = new struct trajetoria; trajetoria->pontos = traj; trajetoria->qntdPontos = pointsCounter; if ( ( trajFile = fopen( filePath, "r" ) ) == NULL ) { printf( "File could not be opened: %s.\n", filePath ); return NULL; } int i = 0; while (fgets(buf, MAX_STR_LEN - 1, trajFile) != NULL) { if (strlen(buf)>0) { if(buf[strlen (buf) - 1] == '\n') buf[strlen (buf) - 1] = '\0'; } else { if(buf[0] == '\n') { continue; } } tmp = strtok(buf, ";"); traj[i] = new ponto_capturado(); traj[i]->TID = atoi(tmp); tmp = strtok(NULL, ";"); int len = strlen(tmp); traj[i]->clazz = (char*)malloc(len + 1); strcpy(traj[i]->clazz, tmp); tmp = strtok(NULL, ";"); traj[i]->time = atoi(tmp); tmp = strtok(NULL, ";"); traj[i]->lat = atof(tmp); tmp = strtok(NULL, ";"); traj[i]->lon = atof(tmp); tmp = strtok(NULL, ";"); traj[i]->gid = atoi(tmp); tmp = strtok(NULL, ";"); if ((tmp != NULL) && (tmp[0] == '\0')) { traj[i]->stopId = atoi(tmp); } else { traj[i]->stopId = 0; } i++; } //printf("Loaded %s - %d points\n", filePath, i); fclose(trajFile); return trajetoria; } double* trajectoryRawer(trajetoria* trajetoria) { int N = trajetoria->qntdPontos; double* trajA = (double*)malloc( 4*N*sizeof(double)); for(int i = 0; i < N; i++) { trajA[i * 4] = trajetoria->pontos[i]->lat; trajA[i * 4 + 1] = trajetoria->pontos[i]->lon; trajA[i * 4 + 2] = trajetoria->pontos[i]->time; trajA[i * 4 + 3] = trajetoria->pontos[i]->time + 30; } return trajA; } double distance_sequential(double* trajA, int N, double* trajB, int M) { double* aScore = (double*)malloc( N*sizeof(double)); double* bScore = (double*)malloc( N*M*sizeof(double)); double* semanticsDescriptors = (double*)malloc( 2*2*sizeof(double)); //GEO semanticsDescriptors[0] = 0.0; semanticsDescriptors[1] = 0.5; //TIME semanticsDescriptors[2] = 0.0; semanticsDescriptors[3] = 0.5; //printf("Distance lengthA=%d, lengthB=%d\n", N,M); msm_sequential( trajA, N, trajB, M, aScore, bScore, semanticsDescriptors ); double parityAB = 0.0; for (int i = 0; i < N; i++) { parityAB += aScore[i]; } double parityBA = 0.0; for (int i = 0; i < N; i++) { double maxScore = 0.0; for (int j = 0; j < M; j++) { maxScore = MAX(maxScore, bScore[i * M + j]); } parityBA += maxScore; } double similarity = (parityAB + parityBA) / (N + M); free(semanticsDescriptors); //printf("similarity=%.2f\n", similarity ); free(bScore); free(aScore); aScore = NULL; bScore = NULL; semanticsDescriptors = NULL; return similarity; } double distance(double* trajA, int N, double* trajB, int M) { double* aScore = (double*)malloc( N*sizeof(double)); double* bScore = (double*)malloc( N*M*sizeof(double)); double* semanticsDescriptors = (double*)malloc( 2*2*sizeof(double)); //GEO semanticsDescriptors[0] = 0.0; semanticsDescriptors[1] = 0.5; //TIME semanticsDescriptors[2] = 0.0; semanticsDescriptors[3] = 0.5; double *d_trajA,*d_trajB, *d_aScore, *d_bScore, *d_semanticsDescriptors; hipMalloc( (void**) &d_trajA, 4*N*sizeof(double) ); hipMalloc( (void**) &d_trajB, 4*M*sizeof(double) ); hipMalloc( (void**) &d_semanticsDescriptors, 2*2*sizeof(double) ); hipMalloc( (void**) &d_aScore, N*sizeof(double) ); hipMalloc( (void**) &d_bScore, N*M*sizeof(double) ); hipMemcpy( (void*) d_trajA, (void*) trajA, 4*N*sizeof(double), hipMemcpyHostToDevice); hipMemcpy( (void*) d_trajB, (void*) trajB, 4*M*sizeof(double), hipMemcpyHostToDevice); hipMemcpy( (void*) d_semanticsDescriptors, (void*) semanticsDescriptors, 2*2*sizeof(double), hipMemcpyHostToDevice); int THREADS = 512; int BLOCOS = (N/THREADS) + 1; struct timeval begin, end; gettimeofday(&begin, NULL); hipLaunchKernelGGL(( msm), dim3(BLOCOS), dim3(THREADS), 0, 0, d_trajA, N, d_trajB, M, d_aScore, d_bScore, d_semanticsDescriptors ); gettimeofday(&end, NULL); hipMemcpy( (void*) aScore, (void*) d_aScore, N*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy( (void*) bScore, (void*) d_bScore, N*M*sizeof(double), hipMemcpyDeviceToHost); hipFree(d_trajA); hipFree(d_trajB); hipFree(d_aScore); hipFree(d_bScore); hipFree(d_semanticsDescriptors); double parityAB = 0.0; for (int i = 0; i < N; i++) { parityAB += aScore[i]; } double parityBA = 0.0; for (int i = 0; i < N; i++) { double maxScore = 0.0; for (int j = 0; j < M; j++) { maxScore = MAX(maxScore, bScore[i * M + j]); } parityBA += maxScore; } double similarity = (parityAB + parityBA) / (N + M); free(semanticsDescriptors); //printf("similarity=%.2f\n", similarity ); free(bScore); free(aScore); aScore = NULL; bScore = NULL; semanticsDescriptors = NULL; return similarity; } void msm_sequential(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors) { double geoThreshold = semanticsDescriptors[0]; double timeThreshold = semanticsDescriptors[2]; double geoWeight = semanticsDescriptors[1]; double timeWeight = semanticsDescriptors[3]; for(int i = 0; i < lengthA; i++) { double latGeoA = trajA[i * 4]; double lonGeoA = trajA[i * 4 + 1]; double startTimeA = trajA[i * 4 + 2]; double endTimeA = trajA[i * 4 + 3]; double maxScore = 0.0; for (int j = 0; j < lengthB; j++) { double latGeoB = trajB[j * 4]; double lonGeoB = trajB[j * 4 + 1]; double startTimeB = trajB[j * 4 + 2]; double endTimeB = trajB[j * 4 + 3]; double timeScore = 0.0; if(startTimeA < endTimeB && startTimeB < endTimeA ) { double overlap = MIN(endTimeA, endTimeB) - MAX(startTimeA, startTimeB); if(overlap > 0.0) { double duration = MAX(endTimeA, endTimeB) - MIN(startTimeA, startTimeB); double timeDistance = 1 - (overlap / duration); timeScore = (timeDistance <= timeThreshold ? 1 : 0) * timeWeight; } } double geoB[] = {latGeoB, lonGeoB}; double geoA[] = {latGeoA, lonGeoA}; double geoScore = (euclidean_local(geoB, geoA) <= geoThreshold ? 1 : 0) * geoWeight; double sumScore = timeScore + geoScore; if(sumScore > maxScore) { maxScore = sumScore; } bScore[i * lengthB + j] = sumScore; } aScore[i] = maxScore; } } //extern "C" __global__ void msm(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i>=lengthA) { return; } double latGeoA = trajA[i * 4]; double lonGeoA = trajA[i * 4 + 1]; double startTimeA = trajA[i * 4 + 2]; double endTimeA = trajA[i * 4 + 3]; double geoThreshold = semanticsDescriptors[0]; double timeThreshold = semanticsDescriptors[2]; double geoWeight = semanticsDescriptors[1]; double timeWeight = semanticsDescriptors[3]; double maxScore = 0.0; for (int j = 0; j < lengthB; j++) { double latGeoB = trajB[j * 4]; double lonGeoB = trajB[j * 4 + 1]; double startTimeB = trajB[j * 4 + 2]; double endTimeB = trajB[j * 4 + 3]; double timeScore = 0.0; if(startTimeA < endTimeB && startTimeB < endTimeA ) { double overlap = MIN(endTimeA, endTimeB) - MAX(startTimeA, startTimeB); if(overlap > 0.0) { double duration = MAX(endTimeA, endTimeB) - MIN(startTimeA, startTimeB); double timeDistance = 1 - (overlap / duration); timeScore = (timeDistance <= timeThreshold ? 1 : 0) * timeWeight; } } double geoB[] = {latGeoB, lonGeoB}; double geoA[] = {latGeoA, lonGeoA}; double geoScore = (euclidean(geoB, geoA) <= geoThreshold ? 1 : 0) * geoWeight; double sumScore = timeScore + geoScore; if(sumScore > maxScore) { maxScore = sumScore; } bScore[i * lengthB + j] = sumScore; } aScore[i] = maxScore; } __device__ double euclidean(double *p1, double *p2) { double distX = abs(p1[0] - p2[0]); double distXSquare = distX * distX; double distY = abs(p1[1] - p2[1]); double distYSquare = distY * distY; return sqrt(distXSquare + distYSquare); } double euclidean_local(double *p1, double *p2) { double distX = abs(p1[0] - p2[0]); double distXSquare = distX * distX; double distY = abs(p1[1] - p2[1]); double distYSquare = distY * distY; return sqrt(distXSquare + distYSquare); }
bf372439a553baccf010266235000ce93ccd13a1.cu
#include <inttypes.h> #include <math.h> #include <stdlib.h> #include <stdio.h> //#include <time.h> #include <sys/time.h> #include <sys/types.h> #include <dirent.h> #include <unistd.h> #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) #define MAX_STR_LEN 256 struct ponto_capturado{ int TID; char *clazz; int time; double lat, lon; int gid; int stopId; }; struct trajetoria{ ponto_capturado** pontos; int qntdPontos; }; __device__ double euclidean(double *p1, double *p2); __global__ void msm(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors); void msm_sequential(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors); double euclidean_local(double *p1, double *p2); trajetoria** trajetorias; trajetoria* readTrajFile(char*); double* trajectoryRawer(trajetoria*); double distance(double*, int, double*, int); double distance_sequential(double*, int, double*, int); int main(int argc, char *argv[]) { int file_count = 0; int len; DIR * dirp; struct dirent * entry; dirp = opendir("./trajetorias"); while ((entry = readdir(dirp)) != NULL) { len = strlen (entry->d_name); if (entry->d_type == DT_REG && strcmp (".traj", &(entry->d_name[len - 5])) == 0) { /* If the entry is a regular file */ file_count++; } } closedir(dirp); trajetorias = (trajetoria**) malloc(file_count*sizeof(trajetoria*)); DIR* FD; struct dirent* in_file; if (NULL == (FD = opendir ("./trajetorias"))) { fprintf(stderr, "Error : Failed to open input directory\n"); return 1; } int fileCounter = 0; while ((in_file = readdir(FD))) { len = strlen (in_file->d_name); if (len > 4 && in_file->d_type == DT_REG && strcmp (".traj", &(in_file->d_name[len - 5])) == 0) { if (!strcmp (in_file->d_name, ".")) continue; if (!strcmp (in_file->d_name, "..")) continue; char filePath[1024]; sprintf( filePath, "%s/%s", "./trajetorias", in_file->d_name ); trajetorias[fileCounter++] = readTrajFile(filePath); } } printf("Qntd arquivos lidos %d\n", file_count); double** allDistances = (double**) malloc(file_count*sizeof(double*)); double** rawTrajs = (double**) malloc(file_count*sizeof(double*)); for(int k = 0;k<file_count;k++) { rawTrajs[k] = trajectoryRawer(trajetorias[k]); } for(int k = 0;k<file_count;k++) { allDistances[k] = (double*) malloc(file_count*sizeof(double)); } printf("Trajetorias transformadas %d\n", file_count); struct timeval begin, end; printf("Executando algoritmo sequencial\n"); gettimeofday(&begin, NULL); for(int k = 0;k<file_count;k++) { allDistances[k][k] = 0.0; for(int l = 0;l<file_count;l++) { if(k<l) { double *trajA = rawTrajs[k]; double *trajB = rawTrajs[l]; double similarity = distance_sequential(trajA, trajetorias[k]->qntdPontos, trajB, trajetorias[l]->qntdPontos); allDistances[k][l] = similarity; allDistances[l][k] = similarity; } } } gettimeofday(&end, NULL); float cpuTime = 1000000*(float)(end.tv_sec - begin.tv_sec); cpuTime += (float)(end.tv_usec - begin.tv_usec); printf("Tempo de execução para a construção da matriz de similaridade entre todas as trajetórias: %9.2f microssegundos\n", cpuTime); /* printf("Dados das tabelas de similaridade\nCPU:\n"); for(int i = 0; i < file_count;i++) { for(int j = 0; j < file_count;j++) { printf("%.2f, ", allDistances[i][j]); } printf("\n"); }*/ printf("Executando algoritmo acelerado por GPU\n"); gettimeofday(&begin, NULL); for(int k = 0;k<file_count;k++) { allDistances[k][k] = 0.0; for(int l = 0;l<file_count;l++) { if(k<l) { double *trajA = rawTrajs[k]; double *trajB = rawTrajs[l]; double similarity = distance(trajA, trajetorias[k]->qntdPontos, trajB, trajetorias[l]->qntdPontos); allDistances[k][l] = similarity; allDistances[l][k] = similarity; } } } gettimeofday(&end, NULL); float gpuTime = 1000000*(float)(end.tv_sec - begin.tv_sec); gpuTime += (float)(end.tv_usec - begin.tv_usec); printf("Tempo de execução para a construção da matriz de similaridade entre todas as trajetórias: %9.2f microssegundos\n", gpuTime); /* printf("GPU:\n"); for(int i = 0; i < file_count;i++) { for(int j = 0; j < file_count;j++) { printf("%.2f, ", allDistances[i][j]); } printf("\n"); }*/ for(int i = 0; i < file_count;i++) { if(trajetorias[i]) { for(int j = 0; j < trajetorias[i]->qntdPontos;j++) { free(trajetorias[i]->pontos[j]); } free(trajetorias[i]); } } free(trajetorias); return 0; } trajetoria* readTrajFile(char *filePath) { /* FileStream for the Library File */ FILE *trajFile; /* allocation of the buffer for every line in the File */ char *buf = (char*) malloc(MAX_STR_LEN); char *tmp; /* if the space could not be allocaed, return an error */ if (buf == NULL) { printf ("No memory\n"); return NULL; } if ( ( trajFile = fopen( filePath, "r" ) ) == NULL ) //Reading a file { printf( "File could not be opened: %s.\n", filePath ); return NULL; } int pointsCounter = 0; while (fgets(buf, MAX_STR_LEN - 1, trajFile) != NULL) { pointsCounter++; } fclose(trajFile); ponto_capturado **traj = (ponto_capturado**) malloc(pointsCounter*sizeof(ponto_capturado*)); trajetoria* trajetoria = new struct trajetoria; trajetoria->pontos = traj; trajetoria->qntdPontos = pointsCounter; if ( ( trajFile = fopen( filePath, "r" ) ) == NULL ) { printf( "File could not be opened: %s.\n", filePath ); return NULL; } int i = 0; while (fgets(buf, MAX_STR_LEN - 1, trajFile) != NULL) { if (strlen(buf)>0) { if(buf[strlen (buf) - 1] == '\n') buf[strlen (buf) - 1] = '\0'; } else { if(buf[0] == '\n') { continue; } } tmp = strtok(buf, ";"); traj[i] = new ponto_capturado(); traj[i]->TID = atoi(tmp); tmp = strtok(NULL, ";"); int len = strlen(tmp); traj[i]->clazz = (char*)malloc(len + 1); strcpy(traj[i]->clazz, tmp); tmp = strtok(NULL, ";"); traj[i]->time = atoi(tmp); tmp = strtok(NULL, ";"); traj[i]->lat = atof(tmp); tmp = strtok(NULL, ";"); traj[i]->lon = atof(tmp); tmp = strtok(NULL, ";"); traj[i]->gid = atoi(tmp); tmp = strtok(NULL, ";"); if ((tmp != NULL) && (tmp[0] == '\0')) { traj[i]->stopId = atoi(tmp); } else { traj[i]->stopId = 0; } i++; } //printf("Loaded %s - %d points\n", filePath, i); fclose(trajFile); return trajetoria; } double* trajectoryRawer(trajetoria* trajetoria) { int N = trajetoria->qntdPontos; double* trajA = (double*)malloc( 4*N*sizeof(double)); for(int i = 0; i < N; i++) { trajA[i * 4] = trajetoria->pontos[i]->lat; trajA[i * 4 + 1] = trajetoria->pontos[i]->lon; trajA[i * 4 + 2] = trajetoria->pontos[i]->time; trajA[i * 4 + 3] = trajetoria->pontos[i]->time + 30; } return trajA; } double distance_sequential(double* trajA, int N, double* trajB, int M) { double* aScore = (double*)malloc( N*sizeof(double)); double* bScore = (double*)malloc( N*M*sizeof(double)); double* semanticsDescriptors = (double*)malloc( 2*2*sizeof(double)); //GEO semanticsDescriptors[0] = 0.0; semanticsDescriptors[1] = 0.5; //TIME semanticsDescriptors[2] = 0.0; semanticsDescriptors[3] = 0.5; //printf("Distance lengthA=%d, lengthB=%d\n", N,M); msm_sequential( trajA, N, trajB, M, aScore, bScore, semanticsDescriptors ); double parityAB = 0.0; for (int i = 0; i < N; i++) { parityAB += aScore[i]; } double parityBA = 0.0; for (int i = 0; i < N; i++) { double maxScore = 0.0; for (int j = 0; j < M; j++) { maxScore = MAX(maxScore, bScore[i * M + j]); } parityBA += maxScore; } double similarity = (parityAB + parityBA) / (N + M); free(semanticsDescriptors); //printf("similarity=%.2f\n", similarity ); free(bScore); free(aScore); aScore = NULL; bScore = NULL; semanticsDescriptors = NULL; return similarity; } double distance(double* trajA, int N, double* trajB, int M) { double* aScore = (double*)malloc( N*sizeof(double)); double* bScore = (double*)malloc( N*M*sizeof(double)); double* semanticsDescriptors = (double*)malloc( 2*2*sizeof(double)); //GEO semanticsDescriptors[0] = 0.0; semanticsDescriptors[1] = 0.5; //TIME semanticsDescriptors[2] = 0.0; semanticsDescriptors[3] = 0.5; double *d_trajA,*d_trajB, *d_aScore, *d_bScore, *d_semanticsDescriptors; cudaMalloc( (void**) &d_trajA, 4*N*sizeof(double) ); cudaMalloc( (void**) &d_trajB, 4*M*sizeof(double) ); cudaMalloc( (void**) &d_semanticsDescriptors, 2*2*sizeof(double) ); cudaMalloc( (void**) &d_aScore, N*sizeof(double) ); cudaMalloc( (void**) &d_bScore, N*M*sizeof(double) ); cudaMemcpy( (void*) d_trajA, (void*) trajA, 4*N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( (void*) d_trajB, (void*) trajB, 4*M*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( (void*) d_semanticsDescriptors, (void*) semanticsDescriptors, 2*2*sizeof(double), cudaMemcpyHostToDevice); int THREADS = 512; int BLOCOS = (N/THREADS) + 1; struct timeval begin, end; gettimeofday(&begin, NULL); msm<<<BLOCOS, THREADS>>>( d_trajA, N, d_trajB, M, d_aScore, d_bScore, d_semanticsDescriptors ); gettimeofday(&end, NULL); cudaMemcpy( (void*) aScore, (void*) d_aScore, N*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy( (void*) bScore, (void*) d_bScore, N*M*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_trajA); cudaFree(d_trajB); cudaFree(d_aScore); cudaFree(d_bScore); cudaFree(d_semanticsDescriptors); double parityAB = 0.0; for (int i = 0; i < N; i++) { parityAB += aScore[i]; } double parityBA = 0.0; for (int i = 0; i < N; i++) { double maxScore = 0.0; for (int j = 0; j < M; j++) { maxScore = MAX(maxScore, bScore[i * M + j]); } parityBA += maxScore; } double similarity = (parityAB + parityBA) / (N + M); free(semanticsDescriptors); //printf("similarity=%.2f\n", similarity ); free(bScore); free(aScore); aScore = NULL; bScore = NULL; semanticsDescriptors = NULL; return similarity; } void msm_sequential(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors) { double geoThreshold = semanticsDescriptors[0]; double timeThreshold = semanticsDescriptors[2]; double geoWeight = semanticsDescriptors[1]; double timeWeight = semanticsDescriptors[3]; for(int i = 0; i < lengthA; i++) { double latGeoA = trajA[i * 4]; double lonGeoA = trajA[i * 4 + 1]; double startTimeA = trajA[i * 4 + 2]; double endTimeA = trajA[i * 4 + 3]; double maxScore = 0.0; for (int j = 0; j < lengthB; j++) { double latGeoB = trajB[j * 4]; double lonGeoB = trajB[j * 4 + 1]; double startTimeB = trajB[j * 4 + 2]; double endTimeB = trajB[j * 4 + 3]; double timeScore = 0.0; if(startTimeA < endTimeB && startTimeB < endTimeA ) { double overlap = MIN(endTimeA, endTimeB) - MAX(startTimeA, startTimeB); if(overlap > 0.0) { double duration = MAX(endTimeA, endTimeB) - MIN(startTimeA, startTimeB); double timeDistance = 1 - (overlap / duration); timeScore = (timeDistance <= timeThreshold ? 1 : 0) * timeWeight; } } double geoB[] = {latGeoB, lonGeoB}; double geoA[] = {latGeoA, lonGeoA}; double geoScore = (euclidean_local(geoB, geoA) <= geoThreshold ? 1 : 0) * geoWeight; double sumScore = timeScore + geoScore; if(sumScore > maxScore) { maxScore = sumScore; } bScore[i * lengthB + j] = sumScore; } aScore[i] = maxScore; } } //extern "C" __global__ void msm(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i>=lengthA) { return; } double latGeoA = trajA[i * 4]; double lonGeoA = trajA[i * 4 + 1]; double startTimeA = trajA[i * 4 + 2]; double endTimeA = trajA[i * 4 + 3]; double geoThreshold = semanticsDescriptors[0]; double timeThreshold = semanticsDescriptors[2]; double geoWeight = semanticsDescriptors[1]; double timeWeight = semanticsDescriptors[3]; double maxScore = 0.0; for (int j = 0; j < lengthB; j++) { double latGeoB = trajB[j * 4]; double lonGeoB = trajB[j * 4 + 1]; double startTimeB = trajB[j * 4 + 2]; double endTimeB = trajB[j * 4 + 3]; double timeScore = 0.0; if(startTimeA < endTimeB && startTimeB < endTimeA ) { double overlap = MIN(endTimeA, endTimeB) - MAX(startTimeA, startTimeB); if(overlap > 0.0) { double duration = MAX(endTimeA, endTimeB) - MIN(startTimeA, startTimeB); double timeDistance = 1 - (overlap / duration); timeScore = (timeDistance <= timeThreshold ? 1 : 0) * timeWeight; } } double geoB[] = {latGeoB, lonGeoB}; double geoA[] = {latGeoA, lonGeoA}; double geoScore = (euclidean(geoB, geoA) <= geoThreshold ? 1 : 0) * geoWeight; double sumScore = timeScore + geoScore; if(sumScore > maxScore) { maxScore = sumScore; } bScore[i * lengthB + j] = sumScore; } aScore[i] = maxScore; } __device__ double euclidean(double *p1, double *p2) { double distX = abs(p1[0] - p2[0]); double distXSquare = distX * distX; double distY = abs(p1[1] - p2[1]); double distYSquare = distY * distY; return sqrt(distXSquare + distYSquare); } double euclidean_local(double *p1, double *p2) { double distX = abs(p1[0] - p2[0]); double distXSquare = distX * distX; double distY = abs(p1[1] - p2[1]); double distYSquare = distY * distY; return sqrt(distXSquare + distYSquare); }
f531f1120e603730e348fbd9e32f3b29d3c36e5f.hip
// !!! This is a file automatically generated by hipify!!! #include "SceCells.h" #include <cmath> double epsilon = 1.0e-12; __constant__ double membrEquLen; __constant__ double membrStiff; __constant__ double membrStiff_Mitotic; //Ali June 30 __constant__ double pI; __constant__ double minLength; __constant__ double minDivisor; __constant__ uint maxAllNodePerCell; __constant__ uint maxMembrPerCell; __constant__ uint maxIntnlPerCell; __constant__ double bendCoeff; __constant__ double bendCoeff_Mitotic;//AAMIRI __constant__ double sceIB_M[5]; __constant__ double sceIBDiv_M[5]; __constant__ double sceII_M[5]; __constant__ double sceIIDiv_M[5]; __constant__ double grthPrgrCriEnd_M; __constant__ double F_Ext_Incline_M2 ; //Ali //Ali & Abu June 30th __device__ double calMembrForce_Mitotic(double& length, double& progress, double mitoticCri) { if (progress <= mitoticCri) { return (length - membrEquLen) * membrStiff; } else { return (length - membrEquLen) *(membrStiff+ (membrStiff_Mitotic-membrStiff)* (progress-mitoticCri)/(1.0-mitoticCri)); } } // //Ali __device__ double calExtForce(double& curTime) { return curTime * F_Ext_Incline_M2; } //Ali __device__ double obtainRandAngle(uint& cellRank, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(cellRank); thrust::uniform_real_distribution<double> u0Pi(0, 2.0 * pI); double randomAngle = u0Pi(rng); return randomAngle; } __device__ uint obtainNewIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount); } //AAMIRI __device__ uint obtainLastIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount ); } //AAMIRI __device__ uint obtainMembEndNode(uint& cellRank, uint& activeMembrNodeThis) { return (cellRank * maxAllNodePerCell + activeMembrNodeThis - 1 ); } __device__ bool isAllIntnlFilled(uint& currentIntnlCount) { if (currentIntnlCount < maxIntnlPerCell) { return false; } else { return true; } } //AAMIRI __device__ int obtainRemovingMembrNodeID(uint &cellRank, uint& activeMembrNodes, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(activeMembrNodes); thrust::uniform_int_distribution<double> dist(0, activeMembrNodes-1); int randomNode = dist(rng); return (cellRank * maxAllNodePerCell + randomNode); } //AAMIRI __device__ bool isAllIntnlEmptied(uint& currentIntnlCount) { if (currentIntnlCount > 0) { return false; } else { return true; } } //AAMIRI __device__ bool isAllMembrEmptied(uint& currentMembrCount) { if (currentMembrCount > 0) { return false; } else { return true; } } __device__ bool longEnough(double& length) { if (length > minLength) { return true; } else { return false; } } __device__ double compDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) { return sqrt( (xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)); } void SceCells::distributeBdryIsActiveInfo() { thrust::fill(nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, true); } void SceCells::distributeProfileIsActiveInfo() { thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile + nodes->getAllocPara().currentActiveProfileNodeCount, true); } void SceCells::distributeECMIsActiveInfo() { uint totalNodeCountForActiveECM = allocPara.currentActiveECM * allocPara.maxNodePerECM; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveECM); thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosECM, nodes->getInfoVecs().nodeIsActive.begin() + totalNodeCountForActiveECM + allocPara.startPosECM, true); } void SceCells::distributeCellIsActiveInfo() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::transform( thrust::make_transform_iterator(countingBegin, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_transform_iterator(countingEnd, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_permutation_iterator( cellInfoVecs.activeNodeCountOfThisCell.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::less<uint>()); } void SceCells::distributeCellGrowthProgress() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara.startPosCells); } void MembrPara::initFromConfig() { membrEquLenCPU = globalConfigVars.getConfigValue("MembrEquLen").toDouble(); membrStiffCPU = globalConfigVars.getConfigValue("MembrStiff").toDouble(); membrStiff_Mitotic = globalConfigVars.getConfigValue("MembrStiff_Mitotic").toDouble(); //Ali June30 membrGrowCoeff_Ori = globalConfigVars.getConfigValue("MembrGrowCoeff").toDouble(); membrGrowLimit_Ori = globalConfigVars.getConfigValue("MembrGrowLimit").toDouble(); membrGrowCoeff = membrGrowCoeff_Ori; membrGrowLimit = membrGrowLimit_Ori; //Ali F_Ext_Incline = globalConfigVars.getConfigValue("FExtIncline").toDouble(); //Ali membrBendCoeff = globalConfigVars.getConfigValue("MembrBenCoeff").toDouble(); //AAMIRI membrBendCoeff_Mitotic = globalConfigVars.getConfigValue("MembrBenCoeff_Mitotic").toDouble(); adjustLimit = globalConfigVars.getConfigValue("MembrAdjustLimit").toDouble(); adjustCoeff = globalConfigVars.getConfigValue("MembrAdjustCoeff").toDouble(); growthConst_N = globalConfigVars.getConfigValue("MembrGrowthConst").toDouble(); initMembrCt_N = globalConfigVars.getConfigValue("InitMembrNodeCount").toInt(); initIntnlCt_N = globalConfigVars.getConfigValue("InitCellNodeCount").toInt(); } SceCells::SceCells() { //curTime = 0 + 55800.0;//AAMIRI // Ali I comment that our safely on 04/04/2017 std ::cout << "I am in SceCells constructor with zero element "<<InitTimeStage<<std::endl ; } void SceCells::growAtRandom(double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; // randomly select growth direction and speed. randomizeGrowth(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); //std::cout << "after apply stretch force" << std::endl; cellChemotaxis(); //std::cout << "after apply cell chemotaxis" << std::endl; addPointIfScheduledToGrow(); //std::cout << "after adding node" << std::endl; } /** * Use the growth magnitude and dt to update growthProgress. */ void SceCells::updateGrowthProgress() { thrust::transform(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); } /** * Decide if the cells are going to add a node or not. * Use lastCheckPoint and growthProgress to decide whether add point or not */ void SceCells::decideIsScheduleToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } /** * Calculate target length of cell given the cell growth progress. * length is along the growth direction. */ void SceCells::computeCellTargetLength() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } /** * Compute distance of each node to its corresponding cell center. * The distantce could be either positive or negative, depending on the pre-defined * growth direction. */ void SceCells::computeDistToCellCenter() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } /** * For nodes of each cell, find the maximum and minimum distance to the center. * We will then calculate the current length of a cell along its growth direction * using max and min distance to the center. */ void SceCells::findMinAndMaxDistToCenter() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } /** * Compute the difference for cells between their expected length and current length. */ void SceCells::computeLenDiffExpCur() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } /** * Use the difference that just computed and growthXDir&growthYDir * to apply stretching force (velocity) on nodes of all cells */ void SceCells::stretchCellGivenLenDiff() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyStretchForce(bioPara.elongationCoefficient)); } /** * This is just an attempt. Cells move according to chemicals. */ void SceCells::cellChemotaxis() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyChemoVel(bioPara.chemoCoefficient)); } /** * Adjust the velocities of nodes. * For example, velocity of boundary nodes must be zero. */ void SceCells::adjustNodeVel() { thrust::counting_iterator<uint> countingIterBegin(0); thrust::counting_iterator<uint> countingIterEnd( totalNodeCountForActiveCells + allocPara.startPosCells); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), VelocityModifier(allocPara.startPosProfile, allocPara.currentActiveProfileNodeCount)); } /** * Move nodes according to the velocity we just adjusted. */ void SceCells::moveNodes() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2(dt)); } /** * Add a point to a cell if it is scheduled to grow. * This step does not guarantee success ; If adding new point failed, it will not change * isScheduleToGrow and activeNodeCount; */ void SceCells::addPointIfScheduledToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.lastCheckPoint.begin())), AddPtOp(allocPara.maxNodeOfOneCell, miscPara.addNodeDistance, miscPara.minDistanceToOtherNode, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, time(NULL), miscPara.growThreshold)); } //Ali commented this constructor in 04/04/2017 SceCells::SceCells(SceNodes* nodesInput, std::vector<uint>& numOfInitActiveNodesOfCells, std::vector<SceNodeType>& cellTypes) : countingBegin(0), initIntnlNodeCount( nodesInput->getAllocPara().maxNodeOfOneCell / 2), initGrowthProgress( 0.0) { curTime = 0.0 + 55800.0;//AAMIRI std ::cout << "I am in SceCells constructor with polymorphism shape "<<InitTimeStage<<std::endl ; initialize(nodesInput); copyInitActiveNodeCount(numOfInitActiveNodesOfCells); thrust::device_vector<SceNodeType> cellTypesToPass = cellTypes; setCellTypes(cellTypesToPass); distributeIsActiveInfo(); } SceCells::SceCells(SceNodes* nodesInput, std::vector<uint>& initActiveMembrNodeCounts, std::vector<uint>& initActiveIntnlNodeCounts, std::vector<double> &initGrowProgVec, double InitTimeStage) { // curTime = 0.0 + 55800.0;//AAMIRIi curTime=InitTimeStage ; std ::cout << "I am in SceCells constructor with number of inputs "<<InitTimeStage<<std::endl ; lastTimeExchange=0 ; firstTimeReadDpp=true ; //currentActiveCellCountOld=1 ; // small number tmpDebug = false; aniDebug = false; membrPara.initFromConfig(); shrinkRatio = globalConfigVars.getConfigValue("ShrinkRatio").toDouble(); centerShiftRatio = globalConfigVars.getConfigValue("CenterShiftRatio").toDouble(); memNewSpacing = globalConfigVars.getConfigValue("MembrLenDiv").toDouble(); initialize_M(nodesInput); cout<< "size of dpp in constructor is "<< cellInfoVecs.cell_Dpp.size() << endl ; copyToGPUConstMem(); copyInitActiveNodeCount_M(initActiveMembrNodeCounts, initActiveIntnlNodeCounts, initGrowProgVec); } void SceCells::initCellInfoVecs() { cellInfoVecs.growthProgress.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.expectedLength.resize(allocPara.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara.maxCellCount); cellInfoVecs.activeNodeCountOfThisCell.resize(allocPara.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara.maxCellCount); cellInfoVecs.cellTypes.resize(allocPara.maxCellCount, MX); cellInfoVecs.isScheduledToGrow.resize(allocPara.maxCellCount, false); cellInfoVecs.centerCoordX.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara.maxCellCount); cellInfoVecs.cellRanksTmpStorage.resize(allocPara.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara.maxCellCount, false); } void SceCells::initCellInfoVecs_M() { //std::cout << "max cell count = " << allocPara_m.maxCellCount << std::endl; cellInfoVecs.Cell_Damp.resize(allocPara_m.maxCellCount, 36.0); //Ali cellInfoVecs.cell_Dpp.resize(allocPara_m.maxCellCount, 0.0); //Ali cellInfoVecs.cell_DppOld.resize(allocPara_m.maxCellCount, 0.0); //Ali //cout<< "size of dpp in init is "<< cellInfoVecs.cell_Dpp.size() << endl ; cellInfoVecs.growthProgress.resize(allocPara_m.maxCellCount, 0.0); //A&A cellInfoVecs.growthProgressOld.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.Cell_Time.resize(allocPara_m.maxCellCount, 0.0); //Ali cellInfoVecs.expectedLength.resize(allocPara_m.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.activeMembrNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.activeIntnlNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara_m.maxCellCount); cellInfoVecs.isEnteringMitotic.resize(allocPara_m.maxCellCount, false); //A&A //cellInfoVecs.isRemoving.resize(allocPara.maxCellCount);//AAMIRI cellInfoVecs.isScheduledToGrow.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isScheduledToShrink.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.isCellActive.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.centerCoordX.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara_m.maxCellCount); cellInfoVecs.HertwigXdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.HertwigYdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.cellRanksTmpStorage.resize(allocPara_m.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara_m.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara_m.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isMembrAddingNode.resize(allocPara_m.maxCellCount, false); cellInfoVecs.maxTenIndxVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxTenRiVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxDistToRiVec.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.maxTenRiMidXVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxTenRiMidYVec.resize(allocPara_m.maxCellCount); cellInfoVecs.aveTension.resize(allocPara_m.maxCellCount); cellInfoVecs.membrGrowProgress.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.membrGrowSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellAreaVec.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellPerimVec.resize(allocPara_m.maxCellCount, 0.0);//AAMIRI std::cout << "finished " << std::endl; } void SceCells::initCellNodeInfoVecs() { cellNodeInfoVecs.cellRanks.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara.maxTotalCellNodeCount); } void SceCells::initCellNodeInfoVecs_M() { std::cout << "max total node count = " << allocPara_m.maxTotalNodeCount << std::endl; cellNodeInfoVecs.cellRanks.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara_m.maxTotalNodeCount); } void SceCells::initGrowthAuxData() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara.startPosCells])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara.startPosCells])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara.startPosCells])); growthAuxData.randomGrowthSpeedMin = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randGenAuxPara = globalConfigVars.getConfigValue( "RandomGenerationAuxPara").toDouble(); if (controlPara.simuType == SingleCellTest) { growthAuxData.fixedGrowthSpeed = globalConfigVars.getConfigValue( "FixedGrowthSpeed").toDouble(); } } void SceCells::initGrowthAuxData_M() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara_m.bdryNodeCount])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara_m.bdryNodeCount])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara_m.bdryNodeCount])); growthAuxData.adhIndxAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeAdhereIndex[allocPara_m.bdryNodeCount])); growthAuxData.randomGrowthSpeedMin_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randomGrowthSpeedMin = growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.randomGrowthSpeedMax_Ori; growthAuxData.grthPrgrCriVal_M_Ori = globalConfigVars.getConfigValue( "GrowthPrgrCriVal").toDouble(); growthAuxData.grthProgrEndCPU = globalConfigVars.getConfigValue( "GrowthPrgrValEnd").toDouble(); } void SceCells::initialize(SceNodes* nodesInput) { nodes = nodesInput; controlPara = nodes->getControlPara(); readMiscPara(); readBioPara(); allocPara = nodesInput->getAllocPara(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); initCellInfoVecs(); initCellNodeInfoVecs(); initGrowthAuxData(); distributeIsCellRank(); } void SceCells::initialize_M(SceNodes* nodesInput) { std::cout << "Initializing cells ...... " << std::endl; //std::cout.flush(); nodes = nodesInput; allocPara_m = nodesInput->getAllocParaM(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); //std::cout << "break point 1 " << std::endl; //std::cout.flush(); controlPara = nodes->getControlPara(); //std::cout << "break point 2 " << std::endl; //std::cout.flush(); readMiscPara_M(); //std::cout << "break point 3 " << std::endl; //std::cout.flush(); initCellInfoVecs_M(); cout<< "size of dpp initilizie is "<< cellInfoVecs.cell_Dpp.size() << endl ; //std::cout << "break point 4 " << std::endl; //std::cout.flush(); readBioPara(); //std::cout << "break point 5 " << std::endl; //std::cout.flush(); //std::cout << "break point 6 " << std::endl; //std::cout.flush(); initCellNodeInfoVecs_M(); //std::cout << "break point 7 " << std::endl; //std::cout.flush(); initGrowthAuxData_M(); //std::cout << "break point 8 " << std::endl; //std::cout.flush(); } void SceCells::copyInitActiveNodeCount( std::vector<uint>& numOfInitActiveNodesOfCells) { thrust::copy(numOfInitActiveNodesOfCells.begin(), numOfInitActiveNodesOfCells.end(), cellInfoVecs.activeNodeCountOfThisCell.begin()); } void SceCells::allComponentsMove() { adjustNodeVel(); moveNodes(); } /** * Mark cell node as either activdistributeIsActiveInfo()e or inactive. * left part of the node array will be active and right part will be inactive. * the threshold is defined by array activeNodeCountOfThisCell. * e.g. activeNodeCountOfThisCell = {2,3} and maxNodeOfOneCell = 5 */ void SceCells::distributeIsActiveInfo() { //std::cout << "before distribute bdry isActive" << std::endl; distributeBdryIsActiveInfo(); //std::cout << "before distribute profile isActive" << std::endl; distributeProfileIsActiveInfo(); //std::cout << "before distribute ecm isActive" << std::endl; distributeECMIsActiveInfo(); //std::cout << "before distribute cells isActive" << std::endl; distributeCellIsActiveInfo(); } void SceCells::distributeIsCellRank() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingCellEnd( totalNodeCountForActiveCells); std::cerr << "totalNodeCount for active cells " << totalNodeCountForActiveCells << std::endl; //thrust::counting_iterator<uint> countingECMEnd(countingECMEnd); // only computes the cell ranks of cells. the rest remain unchanged. thrust::transform(countingBegin, countingCellEnd, nodes->getInfoVecs().nodeCellRank.begin() + allocPara.startPosCells, DivideFunctor(allocPara.maxNodeOfOneCell)); std::cerr << "finished cellRank transformation" << std::endl; } /** * This method computes center of all cells. * more efficient then simply iterating the cell because of parallel reducing. */ void SceCells::computeCenterPos() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); uint totalNumberOfActiveNodes = thrust::reduce( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), isTrue()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalNumberOfActiveNodes, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::equal_to<uint>(), CVec3Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), CVec3Divide()); } /** * 2D version of cell division. * Division process is done by creating two temporary vectors to hold the node information * that are going to divide. * * step 1: based on lengthDifference, expectedLength and growthProgress, * this process determines whether a certain cell is ready to divide and then assign * a boolean value to isDivided. * * step 2. copy those cells that will divide in to the temp vectors created * * step 3. For each cell in the temp vectors, we sort its nodes by its distance to the * corresponding cell center. * This step is not very effcient when the number of cells going to divide is big. * but this is unlikely to happen because cells will divide according to external chemical signaling * and each will have different divide progress. * * step 4. copy the right part of each cell of the sorted array (temp1) to left part of each cell of * another array * * step 5. transform isActive vector of both temp1 and temp2, making only left part of each cell active. * * step 6. insert temp2 to the end of the cell array * * step 7. copy temp1 to the previous position of the cell array. * * step 8. add activeCellCount of the system. * * step 9. mark isDivide of all cells to false. */ void SceCells::divide2DSimplified() { bool isDivisionPresent = decideIfGoingToDivide(); if (!isDivisionPresent) { return; } copyCellsPreDivision(); sortNodesAccordingToDist(); copyLeftAndRightToSeperateArrays(); transformIsActiveArrayOfBothArrays(); addSecondArrayToCellArray(); copyFirstArrayToPreviousPos(); updateActiveCellCount(); markIsDivideFalse(); } bool SceCells::decideIfGoingToDivide() { // step 1 thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isDividing.begin(), cellInfoVecs.growthProgress.begin())), CompuIsDivide(miscPara.isDivideCriticalRatio, allocPara.maxNodeOfOneCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } void SceCells::copyCellsPreDivision() { // step 2 : copy all cell rank and distance to its corresponding center with divide flag = 1 totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara.maxNodeOfOneCell; divAuxData.tmpIsActiveHold1 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpDistToCenter1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRankHold1 = thrust::device_vector<uint>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellTypes = thrust::device_vector<SceNodeType>( divAuxData.nodeStorageCount); divAuxData.tmpIsActiveHold2 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpDistToCenter2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRankHold1.begin(), divAuxData.tmpDistToCenter1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin(), divAuxData.tmpCellTypes.begin())), isTrue()); } /** * performance wise, this implementation is not the best because I can use only one sort_by_key * with speciialized comparision operator. However, This implementation is more robust and won't * compromise performance too much. */ void SceCells::sortNodesAccordingToDist() { //step 3 for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { thrust::sort_by_key( divAuxData.tmpDistToCenter1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpDistToCenter1.begin() + (i + 1) * allocPara.maxNodeOfOneCell, thrust::make_zip_iterator( thrust::make_tuple( divAuxData.tmpXValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpYValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpZValueHold1.begin() + i * allocPara.maxNodeOfOneCell))); } } /** * scatter_if() is a thrust function. * inputIter1 first, * inputIter1 last, * inputIter2 map, * inputIter3 stencil * randomAccessIter output */ void SceCells::copyLeftAndRightToSeperateArrays() { //step 4. thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), make_transform_iterator(countingBegin, LeftShiftFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, IsRightSide(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold2.begin(), divAuxData.tmpYValueHold2.begin(), divAuxData.tmpZValueHold2.begin()))); } void SceCells::transformIsActiveArrayOfBothArrays() { thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold1.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold2.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); if (divAuxData.toBeDivideCount != 0) { std::cout << "before insert, active cell count in nodes:" << nodes->getAllocPara().currentActiveCellCount << std::endl; } } void SceCells::addSecondArrayToCellArray() { /// step 6. call SceNodes function to add newly divided cells nodes->addNewlyDividedCells(divAuxData.tmpXValueHold2, divAuxData.tmpYValueHold2, divAuxData.tmpZValueHold2, divAuxData.tmpIsActiveHold2, divAuxData.tmpCellTypes); } void SceCells::copyFirstArrayToPreviousPos() { thrust::scatter( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.end(), divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple(countingBegin, divAuxData.tmpCellRankHold1.begin())), CompuPos(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells))); /** * after dividing, the cell should resume the initial * (1) node count, which defaults to be half size of max node count * (2) growth progress, which defaults to 0 * (3) last check point, which defaults to 0 */ thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)), thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)) + allocPara.currentActiveCellCount, countingBegin, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), isTrue()); // TODO: combine this one with the previous scatter_if to improve efficiency. thrust::fill( cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount + divAuxData.toBeDivideCount, allocPara.maxNodeOfOneCell / 2); } void SceCells::updateActiveCellCount() { allocPara.currentActiveCellCount = allocPara.currentActiveCellCount + divAuxData.toBeDivideCount; NodeAllocPara para = nodes->getAllocPara(); para.currentActiveCellCount = allocPara.currentActiveCellCount; nodes->setAllocPara(para); } void SceCells::markIsDivideFalse() { thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, false); } void SceCells::readMiscPara() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxNodeOfOneCell = globalConfigVars.getConfigValue("MaxNodePerCell").toInt(); miscPara.growThreshold = 1.0 / (maxNodeOfOneCell - maxNodeOfOneCell / 2) + epsilon; } void SceCells::readMiscPara_M() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxIntnlNodePerCell = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); miscPara.growThreshold = 1.0 / (maxIntnlNodePerCell - maxIntnlNodePerCell / 2) + epsilon; miscPara.prolifDecayCoeff = globalConfigVars.getConfigValue( "ProlifDecayCoeff").toDouble(); } void SceCells::readBioPara() { if (controlPara.simuType != Disc_M) { bioPara.cellInitLength = globalConfigVars.getConfigValue( "CellInitLength").toDouble(); std::cout << "break point 1 " << bioPara.cellInitLength << std::endl; std::cout.flush(); bioPara.cellFinalLength = globalConfigVars.getConfigValue( "CellFinalLength").toDouble(); std::cout << "break point 2 " << bioPara.cellFinalLength << std::endl; std::cout.flush(); bioPara.elongationCoefficient = globalConfigVars.getConfigValue( "ElongateCoefficient").toDouble(); std::cout << "break point 3 " << bioPara.elongationCoefficient << std::endl; std::cout.flush(); } if (controlPara.simuType == Beak) { std::cout << "break point 4 " << std::endl; std::cout.flush(); bioPara.chemoCoefficient = globalConfigVars.getConfigValue( "ChemoCoefficient").toDouble(); } //std::cin >> jj; } void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } /** * To run all the cell level logics. * First step we got center positions of cells. * Grow. */ void SceCells::runAllCellLevelLogicsDisc(double dt) { this->dt = dt; //std::cerr << "enter run all cell level logics" << std::endl; computeCenterPos(); //std::cerr << "after compute center position." << std::endl; if (nodes->getControlPara().controlSwitchs.stab == OFF) { growAtRandom(dt); //grow2DTwoRegions(dt, region1, region2); //std::cerr << "after grow cells" << std::endl; //distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; divide2DSimplified(); //std::cerr << "after divide 2D simplified." << std::endl; distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; distributeCellGrowthProgress(); } allComponentsMove(); //std::cerr << "after all components move." << std::endl; } //Ali void SceCells::runAllCellLogicsDisc_M(double dt) { void SceCells::runAllCellLogicsDisc_M(double dt, double Damp_Coef, double InitTimeStage) { //Ali std::cout << " *** 1 ***" << endl; std::cout.flush(); this->dt = dt; this->Damp_Coef=Damp_Coef ; //Ali this->InitTimeStage=InitTimeStage ; //A & A growthAuxData.prolifDecay =1.0 ; // no decay for right now exp(-curTime * miscPara.prolifDecayCoeff); cout<< "The important curTime used in simulation is here which is"<<curTime <<endl; growthAuxData.randomGrowthSpeedMin = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMax_Ori; curTime = curTime + dt; std::cout << " *** 2 ***" << endl; std::cout.flush(); applySceCellDisc_M(); std::cout << " *** 3 ***" << endl; std::cout.flush(); //Ali computeCenterPos_M(); exchSignal(); BC_Imp_M() ; std::cout << " *** 3.5 ***" << endl; std::cout.flush(); //Ali applyMemForce_M(); std::cout << " *** 4 ***" << endl; std::cout.flush(); //Ali cmment // // computeCenterPos_M(); std::cout << " *** 5 ***" << endl; std::cout.flush(); //Ali cmment // growAtRandom_M(dt); std::cout << " *** 6 ***" << endl; std::cout.flush(); //if (curTime<3300.0) divide2D_M(); std::cout << " *** 7 ***" << endl; std::cout.flush(); distributeCellGrowthProgress_M(); std::cout << " *** 8 ***" << endl; std::cout.flush(); findTangentAndNormal_M();//AAMIRI ADDED May29 allComponentsMove_M(); std::cout << " *** 9 ***" << endl; std::cout.flush(); handleMembrGrowth_M(); std::cout << " *** 10 ***" << endl; std::cout.flush(); } void SceCells::exchSignal(){ if (firstTimeReadDpp) { uint maxTotalNodes=nodes->getInfoVecs().nodeLocX.size() ; signal.Initialize(allocPara_m.maxAllNodePerCell,allocPara_m.maxMembrNodePerCell,maxTotalNodes, allocPara_m.maxCellCount) ; cout << " I passed the initializtion for signaling module" << endl ; } lastTimeExchange=lastTimeExchange+dt ; cout << "last time exchange is " << lastTimeExchange << endl ; cout << "dt is " << dt << endl ; double exchPeriod=1 ; if ( lastTimeExchange>exchPeriod) { lastTimeExchange=0 ; //vector<CVector> cellCentersHost ; //cellCentersHost=getAllCellCenters(); //Ali cout << "I entered the function to update dpp" << endl ; thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+ allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ; Tisu_MinX= *MinX_Itr ; Tisu_MaxX= *MaxX_Itr ; Tisu_MinY= *MinY_Itr ; Tisu_MaxY= *MaxY_Itr ; Tisu_R=0.5*(0.5*(Tisu_MaxX-Tisu_MinX)+0.5*(Tisu_MaxY-Tisu_MinY)) ; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust:: copy (nodes->getInfoVecs().nodeIsActive.begin(),nodes->getInfoVecs().nodeIsActive.begin()+ totalNodeCountForActiveCells, signal.nodeIsActiveHost.begin()); thrust:: copy (nodes->getInfoVecs().nodeLocX.begin(),nodes->getInfoVecs().nodeLocX.begin()+ totalNodeCountForActiveCells, signal.nodeLocXHost.begin()); thrust:: copy (nodes->getInfoVecs().nodeLocY.begin(),nodes->getInfoVecs().nodeLocY.begin()+ totalNodeCountForActiveCells, signal.nodeLocYHost.begin()); thrust:: copy (cellInfoVecs.centerCoordX.begin(),cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterX.begin()); thrust:: copy (cellInfoVecs.centerCoordY.begin(),cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterY.begin()); signal.updateSignal(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,curTime,totalNodeCountForActiveCells,allocPara_m.currentActiveCellCount) ; //Ali assert(cellInfoVecs.cell_Dpp.size()==signal.dppLevel.size()); thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_Dpp.begin()) ; //currentActiveCellCountOld=allocPara_m.currentActiveCellCount; } if (firstTimeReadDpp) { thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_DppOld.begin()) ; firstTimeReadDpp=false ; } } void SceCells::runStretchTest(double dt) { this->dt = dt; computeCenterPos(); growAlongX(false, dt); moveNodes(); } void SceCells::growAlongX(bool isAddPt, double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; setGrowthDirXAxis(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); if (isAddPt) { addPointIfScheduledToGrow(); } } void SceCells::growWithStress(double d_t) { } std::vector<CVector> SceCells::getAllCellCenters() { //void SceCells::getAllCellCenters() { //thrust::host_vector<double> centerX = cellInfoVecs.centerCoordX; //thrust::host_vector<double> centerY = cellInfoVecs.centerCoordY; //thrust::host_vector<double> centerZ = cellInfoVecs.centerCoordZ; thrust::host_vector<double> centerX( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin() + allocPara_m.currentActiveCellCount, centerX.begin()); thrust::host_vector<double> centerY( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin() + allocPara_m.currentActiveCellCount, centerY.begin()); thrust::host_vector<double> centerZ( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordZ.begin(), cellInfoVecs.centerCoordZ.begin() + allocPara_m.currentActiveCellCount, centerZ.begin()); //infoForSignal.sCenterX=centerX[4] ; //infoForSignal.sCenterY=centerY[4] ; //infoForSignal.sCenterZ=centerZ[4] ; std::vector<CVector> result; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { CVector pos = CVector(centerX[i], centerY[i], centerZ[i]); //infoForSignal.sCenterX=centerX[i] ; //infoForSignal.sCenterY=centerY[i] ; //infoForSignal.sCenterZ=centerZ[i] ; result.push_back(pos); } return result; } void SceCells::setGrowthDirXAxis() { thrust::fill(cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthXDir.begin() + allocPara.currentActiveCellCount, 1.0); thrust::fill(cellInfoVecs.growthYDir.begin(), cellInfoVecs.growthYDir.begin() + allocPara.currentActiveCellCount, 0.0); thrust::fill(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, growthAuxData.fixedGrowthSpeed); } std::vector<double> SceCells::getGrowthProgressVec() { thrust::host_vector<double> growthProVec = cellInfoVecs.growthProgress; std::vector<double> result; for (uint i = 0; i < allocPara.currentActiveCellCount; i++) { result.push_back(growthProVec[i]); } return result; } void SceCells::copyCellsPreDivision_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara_m.maxAllNodePerCell; divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); //A&A divAuxData.tmpHertwigXdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpHertwigYdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); //A&A // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin())), isTrue()); // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpHertwigXdir.begin(), divAuxData.tmpHertwigYdir.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin())), isTrue()); } void SceCells::copyCellsEnterMitotic() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toEnterMitoticCount * allocPara_m.maxAllNodePerCell; divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin())), isTrue()); // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isEnteringMitotic.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin())), isTrue()); } void SceCells::createTwoNewCellArr_M() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); //divDebug(); for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { divAuxData.tmp1IntnlVec.clear(); divAuxData.tmp2IntnlVec.clear(); vector<CVector> membrNodes; vector<CVector> intnlNodes; obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); CVector oldCenter = obtainCenter(i); //A&A commented //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // lenAlongMajorAxis); /*CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, lenAlongMajorAxis);*/ CVector divDir; divDir.x = divAuxData.tmpHertwigXdir[i] ; //A&A divDir.y = divAuxData.tmpHertwigYdir[i] ; //A&A double lenAlongHertwigAxis = calLengthAlongHertwigAxis(divDir, oldCenter, membrNodes);//A&A added // std::vector<VecVal> tmp1Membr, tmp2Membr; CVector cell1Center, cell2Center; obtainTwoNewCenters(oldCenter, divDir, lenAlongHertwigAxis, cell1Center, cell2Center); prepareTmpVec(i, divDir, oldCenter, tmp1Membr, tmp2Membr); processMemVec(tmp1Membr, tmp2Membr); shiftIntnlNodesByCellCenter(cell1Center, cell2Center); assembleVecForTwoCells(i); } //divDebug(); } //A&A void SceCells::findHertwigAxis() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); //divDebug(); for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; vector<CVector> membrNodes; vector<CVector> intnlNodes; obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); CVector oldCenter = obtainCenter(i); double lenAlongMajorAxis; CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, lenAlongMajorAxis); cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; //std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; //std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; //std::cout<<divDir.x<<"HertwigXdir " <<std::endl; //std::cout<<divDir.y<<"HertwigYdir " <<std::endl; } //divDebug(); } void SceCells::copyFirstCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRank]; } } void SceCells::copySecondCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRankMother = divAuxData.tmpCellRank_M[i]; uint cellRank = allocPara_m.currentActiveCellCount + i; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp2InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp2MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother]; cellInfoVecs.cell_Dpp[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother]; } } //AAMIRI /* void SceCells::removeCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; } } */ void SceCells::updateActiveCellCount_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } //AAMIRI /* void SceCells::updateActiveCellCountAfterRemoval_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } */ void SceCells::markIsDivideFalse_M() { thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, false); } void SceCells::adjustNodeVel_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + allocPara_m.bdryNodeCount + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), ForceZero()); } void SceCells::moveNodes_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), //Ali SaxpyFunctorDim2(dt)); SaxpyFunctorDim2_Damp(dt,Damp_Coef)); //Ali } //Ali // This function is written to assigned different damping coefficients to cells, therefore the boundary cells can have more damping void SceCells::moveNodes_BC_M() { thrust::counting_iterator<uint> iBegin2(0); uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2_BC_Damp(dt)); } //Ali void SceCells::applyMemForce_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0); //Ali thrust::fill(cellInfoVecs.Cell_Time.begin(),cellInfoVecs.Cell_Time.begin() +allocPara_m.currentActiveCellCount,curTime); //Ali /* thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; Tisu_MinX= *MinX_Itr ; Tisu_MaxX= *MaxX_Itr ; Tisu_MinY= *MinY_Itr ; Tisu_MaxY= *MaxY_Itr ; */ //cout<< "# of boundary nodes"<< allocPara_m.bdryNodeCount<<endl ; //cout<< "# of total active nodes"<<totalNodeCountForActiveCells <<endl ; //cout<<"The minimum location in X in applyMemForce_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location in X in applyMemForce_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location in Y in applyMemForce_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location in Y in applyMemForce_M is="<<Tisu_MaxY<< endl; //Ali double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; // for now constant //growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.Cell_Time.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.Cell_Time.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().membrTenMagRi.begin(), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrBendLeftX.begin(), nodes->getInfoVecs().membrBendLeftY.begin(), nodes->getInfoVecs().membrBendRightX.begin(), nodes->getInfoVecs().membrBendRightY.begin())) + allocPara_m.bdryNodeCount, AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M)); /**Ali Comment start thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().membrTenMagRi.begin(), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrBendLeftX.begin(), nodes->getInfoVecs().membrBendLeftY.begin(), nodes->getInfoVecs().membrBendRightX.begin(), nodes->getInfoVecs().membrBendRightY.begin())) + allocPara_m.bdryNodeCount, AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr)); **/ // Ali comment end //Ali //Ali double* bendLeftXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftX[0])); double* bendLeftYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftY[0])); double* bendRightXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightX[0])); double* bendRightYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightY[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), AddMembrBend(maxAllNodePerCell, nodeIsActiveAddr, bendLeftXAddr, bendLeftYAddr, bendRightXAddr, bendRightYAddr)); } //AAMIRI void SceCells::findTangentAndNormal_M() { uint totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeF_MI_M_T.begin(), nodes->getInfoVecs().nodeF_MI_M_N.begin(), nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(), nodes->getInfoVecs().nodeExtForceNormal.begin(), nodes->getInfoVecs().membrDistToRi.begin())), CalCurvatures(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)); } void SceCells::runAblationTest(AblationEvent& ablEvent) { for (uint i = 0; i < ablEvent.ablationCells.size(); i++) { int cellRank = ablEvent.ablationCells[i].cellNum; std::vector<uint> removeSeq = ablEvent.ablationCells[i].nodeNums; cellInfoVecs.activeNodeCountOfThisCell[cellRank] = cellInfoVecs.activeNodeCountOfThisCell[cellRank] - removeSeq.size(); nodes->removeNodes(cellRank, removeSeq); } } void SceCells::computeCenterPos_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); //uint totalMembrActiveNodeCount = thrust::reduce( // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.activeMembrNodeCounts.begin() // + allocPara_m.currentActiveCellCount); uint totalIntnlActiveNodeCount = thrust::reduce( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin())) + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), ActiveAndIntnl()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalIntnlActiveNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::equal_to<uint>(), CVec2Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.activeIntnlNodeCounts.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), CVec2Divide()); } void SceCells::BC_Imp_M() { /* thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; MinX= *MinX_Itr ; MaxX= *MaxX_Itr ; MinY= *MinY_Itr ; MaxY= *MaxY_Itr ; */ //cout<<"The minimum location of cell cetners in Y in BC_Imp_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location of cell centers in Y in BC_Imp_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxY<< endl; /** thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), BC_Tissue_Damp(Damp_Coef)) ; **/ int NumActCells=allocPara_m.currentActiveCellCount ; //Ali thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.Cell_Damp.begin())), BC_Tissue_Damp(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,Damp_Coef,NumActCells)) ; /**void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } **/ } void SceCells::growAtRandom_M(double dt) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; randomizeGrowth_M(); updateGrowthProgress_M(); decideIsScheduleToGrow_M(); //computeCellTargetLength_M(); //computeDistToCellCenter_M(); //findMinAndMaxDistToCenter_M(); //computeLenDiffExpCur_M(); //stretchCellGivenLenDiff_M(); addPointIfScheduledToGrow_M(); //decideIsScheduleToShrink_M();// AAMIRI May5 //delPointIfScheduledToGrow_M();//AAMIRI - commented out on June20 adjustGrowthInfo_M(); } void SceCells::divide2D_M() { bool isDivisionPresent = decideIfGoingToDivide_M(); bool isEnteringMitotic = decideIfAnyCellEnteringMitotic() ; //A&A //A&A if (isEnteringMitotic){ std::cout<< "I am in EnteringMitotic"<< std::endl; copyCellsEnterMitotic(); findHertwigAxis(); } //A&A if (!isDivisionPresent) { return; } //aniDebug = true; copyCellsPreDivision_M(); createTwoNewCellArr_M(); copyFirstCellArr_M(); copySecondCellArr_M(); updateActiveCellCount_M(); markIsDivideFalse_M(); //divDebug(); } void SceCells::distributeCellGrowthProgress_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara_m.bdryNodeCount); std::cout << "the vlaue of init time stage in distributeCellGrowthProgress_M is"<< InitTimeStage << std:: endl ; if (curTime <= InitTimeStage+dt)//AAMIRI /A & A thrust::copy( cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.end(), cellInfoVecs.lastCheckPoint.begin() ); } void SceCells::allComponentsMove_M() { //moveNodes_M(); //Ali moveNodes_BC_M(); //Ali } //Ali modified this function to introduce differential proliferation rates void SceCells::randomizeGrowth_M() { double CntrTisuX=0.5*(Tisu_MaxX-Tisu_MinX) ; double CntrTisuY=0.5*(Tisu_MaxY-Tisu_MinY) ; //cout<<"The minimum location of cell cetners in Y in randomizeGrowth_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxY<< endl; uint seed = time(NULL); thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin())), RandomizeGrow_M(CntrTisuX,CntrTisuY,Tisu_R,growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, seed)); } void SceCells::updateGrowthProgress_M() { thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgressOld.begin()); /* thrust::transform(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); */ cout << " I am trying to update growth progress" << endl ; //double dummy=0 ; double mitoticCheckPoint=growthAuxData.grthPrgrCriVal_M_Ori ; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_DppOld.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_DppOld.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), DppGrowRegulator(dt,mitoticCheckPoint)); } void SceCells::decideIsScheduleToGrow_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } //AAMIRI May5 void SceCells::decideIsScheduleToShrink_M() { double laserCenterY = 25.0; double laserCenterX = 25.0; double laserRadius = 4.0; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isScheduledToShrink.begin())), thrust::make_zip_iterator( thrust::make_tuple(iEnd, cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToShrink.begin()+allocPara_m.currentActiveCellCount)), cellInfoVecs.isScheduledToShrink.begin(), isDelOp(laserCenterX, laserCenterY, laserRadius)); } void SceCells::computeCellTargetLength_M() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } void SceCells::computeDistToCellCenter_M() { thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(totalNodeCountForActiveCells); uint endIndx = allocPara_m.bdryNodeCount + totalNodeCountForActiveCells; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + endIndx, nodes->getInfoVecs().nodeLocY.begin() + endIndx, nodes->getInfoVecs().nodeIsActive.begin() + endIndx)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } void SceCells::findMinAndMaxDistToCenter_M() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } void SceCells::computeLenDiffExpCur_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } void SceCells::stretchCellGivenLenDiff_M() { uint count = allocPara_m.maxAllNodePerCell; uint bdry = allocPara_m.bdryNodeCount; uint actCount = totalNodeCountForActiveCells; uint all = bdry + actCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(actCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry, make_transform_iterator(iBegin, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin() + actCount, make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + all, nodes->getInfoVecs().nodeVelY.begin() + all, make_transform_iterator(iEnd, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry)), ApplyStretchForce_M(bioPara.elongationCoefficient, allocPara_m.maxMembrNodePerCell)); } void SceCells::addPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), iBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToGrow.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.centerCoordX.begin() + activeCellCount, cellInfoVecs.centerCoordY.begin() + activeCellCount, iEnd, cellInfoVecs.lastCheckPoint.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lastCheckPoint.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), AddPtOp_M(seed, miscPara.addNodeDistance, miscPara.growThreshold, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } //AAMIRI void SceCells::delPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); int timeStep = curTime/dt; if (curTime>70000.0 && curTime<70000.1){ decideIsScheduleToShrink_M();// AAMIRI } if (curTime > 70000.0) thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToShrink.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), iBegin, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToShrink.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.centerCoordX.begin() + activeCellCount, cellInfoVecs.centerCoordY.begin() + activeCellCount, iEnd, cellInfoVecs.activeMembrNodeCounts.begin() + activeCellCount, cellInfoVecs.isCellActive.begin() + activeCellCount, cellInfoVecs.growthSpeed.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), DelPtOp_M(seed, timeStep, growthAuxData.adhIndxAddr, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } bool SceCells::decideIfGoingToDivide_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), CompuIsDivide_M(allocPara_m.maxIntnlNodePerCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } //A&A bool SceCells::decideIfAnyCellEnteringMitotic() { double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isEnteringMitotic.begin(), CompuIsEnteringMitotic_M(grthPrgrCriVal_M)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toEnterMitoticCount = thrust::reduce(cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.isEnteringMitotic.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toEnterMitoticCount > 0) { return true; } else { return false; } } //AAMIRI /* bool SceCells::decideIfGoingToRemove_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isRemoving.begin(), CompuIsRemoving_M(allocPara_m.maxIntnlNodePerCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeRemovingCount = thrust::reduce(cellInfoVecs.isRemoving.begin(), cellInfoVecs.isRemoving.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeRemovingCount > 0) { return true; } else { return false; } } */ AniRawData SceCells::obtainAniRawData(AnimationCriteria& aniCri) { uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin()))); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; CVector tmpPos; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double aniVal; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = hostTmpVectorTenMag[index1]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = hostTmpVectorTenMag[index2]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } } for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + maxMemNodePerCell + k; if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.internalLinks.push_back(linkData); } } } } } return rawAniData; } AniRawData SceCells::obtainAniRawDataGivenCellColor(vector<double>& cellColors, AnimationCriteria& aniCri, vector<double>& cellsPerimeter, vector <double> & cellsDppLevel) { //AliE cout << "I am in obtainAniRawDataGivenCellColor start"<<endl; uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; assert(cellColors.size() >= activeCellCount); assert(cellsPerimeter.size() == activeCellCount); //AliE AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::host_vector<double> hostTmpVectorF_MI_M_x(maxActiveNode);//AAMIRI //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_y(maxActiveNode);//AAMIRI //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_T(maxActiveNode); //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_N(maxActiveNode);//AliE thrust::host_vector<double> hostTmpVectorNodeCurvature(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorExtForceTangent(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorExtForceNormal(maxActiveNode);//AAMIRI thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeExtForceNormal.begin())),//AAMIRI thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeExtForceNormal.begin()))//AAMIRI + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostTmpVectorF_MI_M_x.begin(), hostTmpVectorF_MI_M_y.begin(),//AAMIRI hostTmpVectorNodeCurvature.begin(), //AAMIRI hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin(), hostTmpVectorExtForceTangent.begin(), hostTmpVectorExtForceNormal.begin())));//AAMIRI //Copy more than 10 elements is not allowed so, I separate it thrust::copy( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(), //Ali nodes->getInfoVecs().nodeF_MI_M_N.begin() //Ali )), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(),//AliE nodes->getInfoVecs().nodeF_MI_M_N.begin() //AliE )) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple( hostTmpVectorF_MI_M_T.begin(), hostTmpVectorF_MI_M_N.begin() ))); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; thrust::host_vector<uint> curActiveIntnlNodeCounts = cellInfoVecs.activeIntnlNodeCounts; CVector tmpPos; CVector tmpF_MI_M ;//AAmiri CVector tmpExtForce;//AAMIRI double tmpCurv; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double node1F_MI_M_x, node1F_MI_M_y;//AAMIRI //AliE double nodeExtForceT, nodeExtForceN;//AAMIRI double aniVal; double aniVal2; double tmpF_MI_M_MagN_Int[activeCellCount-1] ; //AliE //This is how the VTK file is intended to be written. First the memmbraen nodes are going to be written and then internal nodes. //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { tmpF_MI_M_MagN_Int[i]=0.0 ; for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); //AliE // tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+sqrt(pow(hostTmpVectorF_MI_M_x[index1],2)+pow(hostTmpVectorF_MI_M_y[index1],2)) ; //AliE tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+abs(hostTmpVectorF_MI_M_N[index1]) ; //AliE nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI rawAniData.aniNodeExtForceArr.push_back(tmpExtForce); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } //loop on internal nodes for (uint i=0; i<activeCellCount; i++){ for (uint j = maxMemNodePerCell; j < maxNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true ) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI rawAniData.aniNodeExtForceArr.push_back(tmpExtForce); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali added //aniVal2=dppLevels_Cell[i] ; aniVal2=cellsDppLevel[i] ; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } } //loop on internal nodes for (uint i = 0; i < activeCellCount; i++) { // for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = 0; k < allocPara_m.maxAllNodePerCell; k++) { //Ali //for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { //Ali comment index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + k; //Ali // index2 = i * maxNodePerCell + maxMemNodePerCell + k; //Ali comment // if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { if (hostIsActiveVec[index1] && hostIsActiveVec[index2]&& index1 !=index2 ) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.internalLinks.push_back(linkData); } } } } } cout << "I am in obtainAniRawDataGivenCellColor end"<<endl; return rawAniData; } void SceCells::copyInitActiveNodeCount_M( std::vector<uint>& initMembrActiveNodeCounts, std::vector<uint>& initIntnlActiveNodeCounts, std::vector<double> &initGrowProgVec) { assert( initMembrActiveNodeCounts.size() == initIntnlActiveNodeCounts.size()); totalNodeCountForActiveCells = initMembrActiveNodeCounts.size() * allocPara_m.maxAllNodePerCell; thrust::copy(initMembrActiveNodeCounts.begin(), initMembrActiveNodeCounts.end(), cellInfoVecs.activeMembrNodeCounts.begin()); thrust::copy(initIntnlActiveNodeCounts.begin(), initIntnlActiveNodeCounts.end(), cellInfoVecs.activeIntnlNodeCounts.begin()); thrust::copy(initGrowProgVec.begin(), initGrowProgVec.end(), cellInfoVecs.growthProgress.begin()); } void SceCells::myDebugFunction() { uint maxActiveNodeCount = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxActiveCellCount = allocPara_m.currentActiveCellCount; std::cout << "totalNodeCountforActiveCells: " << totalNodeCountForActiveCells << std::endl; std::cout << "maxAllNodePerCell: " << allocPara_m.maxAllNodePerCell << std::endl; std::cout << "maxActiveCellCount: " << maxActiveCellCount << std::endl; std::cout << "bdryNodeCount: " << allocPara_m.bdryNodeCount << std::endl; std::cout << "grow threshold: " << miscPara.growThreshold << std::endl; std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthProgress[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.isScheduledToGrow[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lastCheckPoint[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeCount; i++) { if (nodes->getInfoVecs().nodeIsActive[i] && nodes->getInfoVecs().nodeCellType[i] == CellIntnl) { std::cout << nodes->getInfoVecs().nodeVelX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.activeIntnlNodeCounts[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.expectedLength[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.smallestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.biggestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lengthDifference[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthXDir[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthYDir[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::divDebug() { std::cout << "tmpIsActive_M: "; for (uint i = 0; i < divAuxData.tmpIsActive_M.size(); i++) { std::cout << divAuxData.tmpIsActive_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosX_M: "; for (uint i = 0; i < divAuxData.tmpNodePosX_M.size(); i++) { std::cout << divAuxData.tmpNodePosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosY_M : "; for (uint i = 0; i < divAuxData.tmpNodePosY_M.size(); i++) { std::cout << divAuxData.tmpNodePosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCellRank_M : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { std::cout << divAuxData.tmpCellRank_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirX_M : "; for (uint i = 0; i < divAuxData.tmpDivDirX_M.size(); i++) { std::cout << divAuxData.tmpDivDirX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirY_M : "; for (uint i = 0; i < divAuxData.tmpDivDirY_M.size(); i++) { std::cout << divAuxData.tmpDivDirY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosX_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosX_M.size(); i++) { std::cout << divAuxData.tmpCenterPosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosY_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosY_M.size(); i++) { std::cout << divAuxData.tmpCenterPosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive1_M : "; for (uint i = 0; i < divAuxData.tmpIsActive1_M.size(); i++) { std::cout << divAuxData.tmpIsActive1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos1_M : "; for (uint i = 0; i < divAuxData.tmpXPos1_M.size(); i++) { std::cout << divAuxData.tmpXPos1_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive1_M[i] && divAuxData.tmpIsActive1_M[i - 1] && fabs(divAuxData.tmpXPos1_M[i] - divAuxData.tmpXPos1_M[i - 1]) > 0.1) { std::cout << "11111111111111111111111, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "XPos1_onDevice : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { uint index = divAuxData.tmpCellRank_M[i] * allocPara_m.maxAllNodePerCell + j; std::cout << nodes->getInfoVecs().nodeLocX[index] << " "; } } std::cout << std::endl; std::cout << "tmpYPos1_M : "; for (uint i = 0; i < divAuxData.tmpYPos1_M.size(); i++) { std::cout << divAuxData.tmpYPos1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive2_M: "; for (uint i = 0; i < divAuxData.tmpIsActive2_M.size(); i++) { std::cout << divAuxData.tmpIsActive2_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos2_M : "; for (uint i = 0; i < divAuxData.tmpXPos2_M.size(); i++) { std::cout << divAuxData.tmpXPos2_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive2_M[i] && divAuxData.tmpIsActive2_M[i - 1] && fabs(divAuxData.tmpXPos2_M[i] - divAuxData.tmpXPos2_M[i - 1]) > 0.1) { std::cout << "2222222222222222222, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "tmpYPos2_M : "; for (uint i = 0; i < divAuxData.tmpYPos2_M.size(); i++) { std::cout << divAuxData.tmpYPos2_M[i] << " "; } std::cout << std::endl; std::cout << "tmp1InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp1InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp2InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp1MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp1MemActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp2MemActiveCounts[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::adjustGrowthInfo_M() { uint halfMax = allocPara_m.maxIntnlNodePerCell / 2; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), AdjustGrowth(halfMax), thrust::identity<bool>()); } VtkAnimationData SceCells::outputVtkData(AniRawData& rawAniData, AnimationCriteria& aniCri) { VtkAnimationData vtkData; for (uint i = 0; i < rawAniData.aniNodePosArr.size(); i++) { PointAniData ptAniData; ptAniData.pos = rawAniData.aniNodePosArr[i]; ptAniData.F_MI_M_MagN_Int= rawAniData.aniNodeF_MI_M_MagN_Int[i]; //AliE ptAniData.dppLevel1= rawAniData.dppLevel[i]; //AliE ptAniData.F_MI_M = rawAniData.aniNodeF_MI_M[i];//AAMIRI ptAniData.colorScale = rawAniData.aniNodeVal[i]; ptAniData.colorScale2 = rawAniData.aniNodeCurvature[i];//AAMIRI ptAniData.rankScale = rawAniData.aniNodeRank[i];//AAMIRI ptAniData.extForce = rawAniData.aniNodeExtForceArr[i];//AAMIRI vtkData.pointsAniData.push_back(ptAniData); } for (uint i = 0; i < rawAniData.internalLinks.size(); i++) { LinkAniData linkData = rawAniData.internalLinks[i]; vtkData.linksAniData.push_back(linkData); } for (uint i = 0; i < rawAniData.memLinks.size(); i++) { LinkAniData linkData = rawAniData.memLinks[i]; vtkData.linksAniData.push_back(linkData); } vtkData.isArrowIncluded = false; return vtkData; } void SceCells::copyToGPUConstMem() { double pI_CPU = acos(-1.0); double minLengthCPU = globalConfigVars.getConfigValue("MinLength").toDouble(); hipMemcpyToSymbol(minLength, &minLengthCPU, sizeof(double)); double minDivisorCPU = globalConfigVars.getConfigValue("MinDivisor").toDouble(); hipMemcpyToSymbol(minDivisor, &minDivisorCPU, sizeof(double)); hipMemcpyToSymbol(membrEquLen, &membrPara.membrEquLenCPU, sizeof(double)); hipMemcpyToSymbol(membrStiff, &membrPara.membrStiffCPU, sizeof(double)); hipMemcpyToSymbol(membrStiff_Mitotic, &membrPara.membrStiff_Mitotic, sizeof(double)); // Ali June 30 hipMemcpyToSymbol(pI, &pI_CPU, sizeof(double)); hipMemcpyToSymbol(bendCoeff, &membrPara.membrBendCoeff, sizeof(double)); hipMemcpyToSymbol(bendCoeff_Mitotic, &membrPara.membrBendCoeff_Mitotic, sizeof(double));//AAMIRI hipMemcpyToSymbol(F_Ext_Incline_M2, &membrPara.F_Ext_Incline, sizeof(double)); //Ali uint maxAllNodePerCellCPU = globalConfigVars.getConfigValue( "MaxAllNodeCountPerCell").toInt(); uint maxMembrNodePerCellCPU = globalConfigVars.getConfigValue( "MaxMembrNodeCountPerCell").toInt(); uint maxIntnlNodePerCellCPU = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); hipMemcpyToSymbol(maxAllNodePerCell, &maxAllNodePerCellCPU, sizeof(uint)); hipMemcpyToSymbol(maxMembrPerCell, &maxMembrNodePerCellCPU, sizeof(uint)); hipMemcpyToSymbol(maxIntnlPerCell, &maxIntnlNodePerCellCPU, sizeof(uint)); double sceIntnlBParaCPU_M[5]; double sceIntraParaCPU_M[5]; double sceIntraParaDivCPU_M[5]; double U0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble(); double V0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble(); double k1_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble(); double k2_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble(); double intnlBEffectiveRange = globalConfigVars.getConfigValue( "IntnlBEffectRange").toDouble(); sceIntnlBParaCPU_M[0] = U0_IntnlB; sceIntnlBParaCPU_M[1] = V0_IntnlB; sceIntnlBParaCPU_M[2] = k1_IntnlB; sceIntnlBParaCPU_M[3] = k2_IntnlB; sceIntnlBParaCPU_M[4] = intnlBEffectiveRange; ////////////////////// //// Block 3 ///////// ////////////////////// double U0_Intra = globalConfigVars.getConfigValue("IntraCell_U0").toDouble(); double V0_Intra = globalConfigVars.getConfigValue("IntraCell_V0").toDouble(); double k1_Intra = globalConfigVars.getConfigValue("IntraCell_k1").toDouble(); double k2_Intra = globalConfigVars.getConfigValue("IntraCell_k2").toDouble(); double intraLinkEffectiveRange = globalConfigVars.getConfigValue( "IntraEffectRange").toDouble(); sceIntraParaCPU_M[0] = U0_Intra; sceIntraParaCPU_M[1] = V0_Intra; sceIntraParaCPU_M[2] = k1_Intra; sceIntraParaCPU_M[3] = k2_Intra; sceIntraParaCPU_M[4] = intraLinkEffectiveRange; ////////////////////// //// Block 4 ///////// ////////////////////// double U0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble(); double V0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble(); double k1_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble(); double k2_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble(); double intraDivEffectiveRange = globalConfigVars.getConfigValue( "IntraDivEffectRange").toDouble(); sceIntraParaDivCPU_M[0] = U0_Intra_Div; sceIntraParaDivCPU_M[1] = V0_Intra_Div; sceIntraParaDivCPU_M[2] = k1_Intra_Div; sceIntraParaDivCPU_M[3] = k2_Intra_Div; sceIntraParaDivCPU_M[4] = intraDivEffectiveRange; hipMemcpyToSymbol(grthPrgrCriEnd_M, &growthAuxData.grthProgrEndCPU, sizeof(double)); //hipMemcpyToSymbol(grthPrgrCriVal_M, &growthPrgrCriVal, sizeof(double)); hipMemcpyToSymbol(sceIB_M, sceIntnlBParaCPU_M, 5 * sizeof(double)); hipMemcpyToSymbol(sceII_M, sceIntraParaCPU_M, 5 * sizeof(double)); hipMemcpyToSymbol(sceIIDiv_M, sceIntraParaDivCPU_M, 5 * sizeof(double)); double IBDivHost[5]; IBDivHost[0] = globalConfigVars.getConfigValue("SceIntnlB_U0_Div").toDouble(); IBDivHost[1] = globalConfigVars.getConfigValue("SceIntnlB_V0_Div").toDouble(); IBDivHost[2] = globalConfigVars.getConfigValue("SceIntnlB_k1_Div").toDouble(); IBDivHost[3] = globalConfigVars.getConfigValue("SceIntnlB_k2_Div").toDouble(); IBDivHost[4] = globalConfigVars.getConfigValue("IntnlBDivEffectRange").toDouble(); hipMemcpyToSymbol(sceIBDiv_M, IBDivHost, 5 * sizeof(double)); } void SceCells::handleMembrGrowth_M() { // figure out membr growth speed calMembrGrowSpeed_M(); // figure out which cells will add new point adjustMembrGrowSpeed_M(); decideIfAddMembrNode_M(); // add membr nodes addMembrNodes_M(); //membrDebug(); } void SceCells::calMembrGrowSpeed_M() { membrPara.membrGrowCoeff = growthAuxData.prolifDecay * membrPara.membrGrowCoeff_Ori; membrPara.membrGrowLimit = growthAuxData.prolifDecay * membrPara.membrGrowLimit_Ori; // reduce_by_key, find value of max tension and their index thrust::counting_iterator<uint> iBegin(0); uint maxNPerCell = allocPara_m.maxAllNodePerCell; thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().membrTenMagRi.begin(), make_transform_iterator(iBegin, ModuloFunctor(maxNPerCell)), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrDistToRi.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.maxTenRiVec.begin(), cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin(), cellInfoVecs.maxDistToRiVec.begin())), thrust::equal_to<uint>(), MaxWInfo()); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, nodes->getInfoVecs().membrTensionMag.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.aveTension.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.aveTension.begin(), thrust::divides<double>()); // linear relationship with highest tension; capped by a given value thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), MultiWithLimit(membrPara.membrGrowCoeff, membrPara.membrGrowLimit)); } void SceCells::adjustMembrGrowSpeed_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), AdjustMembrGrow(membrPara.growthConst_N, membrPara.initMembrCt_N, membrPara.initIntnlCt_N)); } void SceCells::decideIfAddMembrNode_M() { // decide if add membrane node given current active node count and // membr growth progress uint curActCellCt = allocPara_m.currentActiveCellCount; thrust::transform(cellInfoVecs.membrGrowSpeed.begin(), cellInfoVecs.membrGrowSpeed.begin() + curActCellCt, cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin(), SaxpyFunctor(dt)); uint maxMembrNode = allocPara_m.maxMembrNodePerCell; /**Ali thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemGrowFunc(maxMembrNode)); */ thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin())) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemGrowFunc(maxMembrNode)); } /** * Add new membrane elements to cells. * This operation is relatively expensive because of memory rearrangement. */ void SceCells::addMembrNodes_M() { thrust::counting_iterator<uint> iBegin(0); uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin())) + curAcCCount, cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), AddMemNode(maxNodePerCell, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.adhIndxAddr), thrust::identity<bool>()); } void SceCells::membrDebug() { uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxActiveNodeC = curAcCCount * allocPara_m.maxAllNodePerCell; uint maxNodePC = allocPara_m.maxAllNodePerCell; //uint tmp = 0; //for (uint i = 0; i < curAcCCount; i++) { // tmp += cellInfoVecs.isMembrAddingNode[i]; //} //if (tmp != 0) { // tmpDebug = true; //} //if (!tmpDebug) { // return; //} for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTensionMag[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTenMagRi[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidY[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < curAcCCount; i++) { std::cout << "(" << cellInfoVecs.maxTenIndxVec[i] << "," << cellInfoVecs.activeMembrNodeCounts[i] << "," << cellInfoVecs.maxTenRiMidXVec[i] << "," << cellInfoVecs.maxTenRiMidYVec[i] << ")" << std::endl; } int jj; std::cin >> jj; } void SceCells::assembleVecForTwoCells(uint i) { uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp1VecMem.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1VecMem[j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1VecMem[j].y; divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } } for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp2VecMem.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2VecMem[j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2VecMem[j].y; divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1MemActiveCounts.push_back(divAuxData.tmp1VecMem.size()); divAuxData.tmp2MemActiveCounts.push_back(divAuxData.tmp2VecMem.size()); for (uint j = membThreshold; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; uint shift_j = j - membThreshold; if (shift_j < divAuxData.tmp1IntnlVec.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].y; divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } if (shift_j < divAuxData.tmp2IntnlVec.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].y; divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1InternalActiveCounts.push_back( divAuxData.tmp1IntnlVec.size()); divAuxData.tmp2InternalActiveCounts.push_back( divAuxData.tmp2IntnlVec.size()); } void SceCells::shiftIntnlNodesByCellCenter(CVector cell1Center, CVector cell2Center) { CVector tmpCell1Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { tmpCell1Center = tmpCell1Center + divAuxData.tmp1IntnlVec[j]; } tmpCell1Center = tmpCell1Center / divAuxData.tmp1IntnlVec.size(); CVector shiftVec1 = cell1Center - tmpCell1Center; for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { divAuxData.tmp1IntnlVec[j] = divAuxData.tmp1IntnlVec[j] + shiftVec1; } CVector tmpCell2Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { tmpCell2Center = tmpCell2Center + divAuxData.tmp2IntnlVec[j]; } tmpCell2Center = tmpCell2Center / divAuxData.tmp2IntnlVec.size(); CVector shiftVec2 = cell2Center - tmpCell2Center; for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { divAuxData.tmp2IntnlVec[j] = divAuxData.tmp2IntnlVec[j] + shiftVec2; } } void SceCells::processMemVec(std::vector<VecVal>& tmp1, std::vector<VecVal>& tmp2) { divAuxData.tmp1VecMem.clear(); divAuxData.tmp2VecMem.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; std::sort(tmp1.begin(), tmp1.end()); std::sort(tmp2.begin(), tmp2.end()); //assert(tmp1.size() < allocPara_m.maxMembrNodePerCell); //assert(tmp2.size() < allocPara_m.maxMembrNodePerCell); uint maxDivMembrNodeCount1 = allocPara_m.maxMembrNodePerCell - tmp1.size(); uint maxDivMembrNodeCount2 = allocPara_m.maxMembrNodePerCell - tmp2.size(); std::vector<CVector> ptsBetween1, ptsBetween2; // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp1.size() >= 1) { ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, memNewSpacing, maxDivMembrNodeCount1); } // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp2.size() >= 1) { ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, memNewSpacing, maxDivMembrNodeCount2); } for (uint j = 0; j < tmp1.size(); j++) { divAuxData.tmp1VecMem.push_back(tmp1[j].vec); } for (uint j = 0; j < tmp2.size(); j++) { divAuxData.tmp2VecMem.push_back(tmp2[j].vec); } for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); } for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); } assert(divAuxData.tmp1VecMem.size() <= membThreshold); assert(divAuxData.tmp2VecMem.size() <= membThreshold); } void SceCells::obtainMembrAndIntnlNodes(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes) { membrNodes.clear(); intnlNodes.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (divAuxData.tmpIsActive_M[index] != true) { continue; } double posX = divAuxData.tmpNodePosX_M[index]; double posY = divAuxData.tmpNodePosY_M[index]; if (j < membThreshold) { // means node type is membrane CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } } } CVector SceCells::obtainCenter(uint i) { double oldCenterX = divAuxData.tmpCenterPosX_M[i]; double oldCenterY = divAuxData.tmpCenterPosY_M[i]; CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } CVector SceCells::calDivDir_MajorAxis(CVector center, vector<CVector>& membrNodes, double& lenAlongMajorAxis) { // not the optimal algorithm but easy to code double maxDiff = 0; CVector majorAxisDir; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double min = 0, max = 0; for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; double tmpVecProduct = tmpDir2 * tmpUnitDir; if (tmpVecProduct < min) { min = tmpVecProduct; } if (tmpVecProduct > max) { max = tmpVecProduct; } } double diff = max - min; if (diff > maxDiff) { maxDiff = diff; majorAxisDir = tmpUnitDir; } } lenAlongMajorAxis = maxDiff; return majorAxisDir; } //A&A double SceCells::calLengthAlongHertwigAxis(CVector divDir, CVector center, vector<CVector>& membrNodes) { CVector divDirUnit = divDir.getUnitVector(); double minUnit = 0, maxUnit = 0; double minOveral = 0, maxOveral = 0; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double tmpVecProductUnit = divDirUnit * tmpUnitDir; double tmpVecProductOveral = divDirUnit * tmpDir; if (tmpVecProductUnit < minUnit) { minUnit = tmpVecProductUnit; minOveral = tmpVecProductOveral; } if (tmpVecProductUnit > maxUnit) { maxUnit = tmpVecProductUnit; maxOveral = tmpVecProductOveral; } } double lenAlongHertwigAxis = maxOveral - minOveral; return lenAlongHertwigAxis; } void SceCells::obtainTwoNewCenters(CVector& oldCenter, CVector& divDir, double len_MajorAxis, CVector& centerNew1, CVector& centerNew2) { CVector divDirUnit = divDir.getUnitVector(); double lenChange = len_MajorAxis / 2.0 * centerShiftRatio; centerNew1 = oldCenter + lenChange * divDirUnit; centerNew2 = oldCenter - lenChange * divDirUnit; } void SceCells::prepareTmpVec(uint i, CVector divDir, CVector oldCenter, std::vector<VecVal>& tmp1, std::vector<VecVal>& tmp2) { tmp1.clear(); tmp2.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; VecVal tmpData; CVector splitDir = divDir.rotateNintyDeg_XY_CC(); for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (j < membThreshold) { // means node type is membrane if (divAuxData.tmpIsActive_M[index] == true) { CVector memPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = memPos - oldCenter; CVector centerToPosUnit = centerToPosDir.getUnitVector(); CVector crossProduct = Cross(centerToPosDir, splitDir); double dotProduct = centerToPosUnit * splitDir; tmpData.val = dotProduct; tmpData.vec = memPos; if (crossProduct.z >= 0) { // counter-cloce wise tmp1.push_back(tmpData); } else { // cloce wise tmp2.push_back(tmpData); } } } else { if (divAuxData.tmpIsActive_M[index] == true) { CVector internalPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = internalPos - oldCenter; CVector shrinkedPos = centerToPosDir * shrinkRatio + oldCenter; double dotProduct = centerToPosDir * divDir; if (dotProduct > 0) { divAuxData.tmp1IntnlVec.push_back(shrinkedPos); } else { divAuxData.tmp2IntnlVec.push_back(shrinkedPos); } } } } } void SceCells::calCellArea() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalTriArea(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellAreaVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } //AAMIRI added to calculate Perimeter of each cell void SceCells::calCellPerim() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalPerim(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellPerimVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } CellsStatsData SceCells::outputPolyCountData() { cout << " I am at begining of outpolycount"<< std::flush ; std::cout.flush(); double sumX,sumY,cntr_X_Domain,cntr_Y_Domain ; int BdryApproach ; BdryApproach=1 ; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; cout << " I am before cells area"<< endl ; calCellArea(); cout << " I am after cells area" << endl ; calCellPerim();//AAMIRI CellsStatsData result; cout << " I am after result" << endl ; uint bdryCriteria = globalConfigVars.getConfigValue("BdryCellCriteria").toInt(); // already on host; no need to call thrust::copy thrust::host_vector<int> adhIndxHost = nodes->getInfoVecs().nodeAdhIndxHostCopy; thrust::host_vector<double> growthProVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, growthProVecHost.begin()); thrust::host_vector<double> growthProMembrVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin() + allocPara_m.currentActiveCellCount, growthProMembrVecHost.begin()); thrust::host_vector<uint> activeMembrNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeMembrNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeMembrNodeCountHost.begin()); thrust::host_vector<uint> activeIntnlNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeIntnlNodeCountHost.begin()); thrust::host_vector<double> centerCoordXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> centerCoordYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin() + allocPara_m.currentActiveCellCount, centerCoordXHost.begin()); thrust::copy(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin() + allocPara_m.currentActiveCellCount, centerCoordYHost.begin()); thrust::host_vector<double> cellAreaHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> cellPerimHost( allocPara_m.currentActiveCellCount);//AAMIRI thrust::host_vector<double> cellDppHost( allocPara_m.currentActiveCellCount);//Ali thrust::copy(cellInfoVecs.cellAreaVec.begin(), cellInfoVecs.cellAreaVec.begin() + allocPara_m.currentActiveCellCount, cellAreaHost.begin()); thrust::copy(cellInfoVecs.cellPerimVec.begin(), cellInfoVecs.cellPerimVec.begin() + allocPara_m.currentActiveCellCount, cellPerimHost.begin());//AAMIRI thrust::copy(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_Dpp.begin() + allocPara_m.currentActiveCellCount, cellDppHost.begin());//Ali sumX=0 ; sumY=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { CellStats cellStatsData; cellStatsData.cellGrowthProgress = growthProVecHost[i]; cellStatsData.cellRank = i; bool isBdry = false; std::set<int> neighbors; std::vector<int> neighborsV; //Ali int neighborStrength[10]; //Ali int continousNoAdh = 0; map <int, int> cellAndNeighborRank ; //Ali //std::cout << "printing adhesion indicies "; //for (int ii=0 ; ii<neighborStrength.size() ; ii++){ for (int ii=0 ; ii< 10; ii++){ //Ali neighborStrength[ii]=0 ; } cellAndNeighborRank.clear(); //Ali for (uint j = 0; j < activeMembrNodeCountHost[i]; j++) { uint index = i * allocPara_m.maxAllNodePerCell + j; //std::cout << adhIndxHost[index] << ","; if (adhIndxHost[index] != -1) { uint adhCellRank = adhIndxHost[index] / allocPara_m.maxAllNodePerCell; //std::cout << adhCellRank << " "; neighbors.insert(adhCellRank); map <int, int>:: iterator iteratorMap=cellAndNeighborRank.find(adhCellRank); //Ali if (iteratorMap==cellAndNeighborRank.end()) { //Ali int NewneighborRank= neighbors.size()-1; //Ali cellAndNeighborRank[adhCellRank]=NewneighborRank; //Ali neighborStrength[NewneighborRank]=neighborStrength[NewneighborRank]+1 ; //Ali neighborsV.push_back(adhCellRank); //Ali } else { //Ali int oldNeighborRank=(*iteratorMap).second ; neighborStrength[oldNeighborRank]=neighborStrength[oldNeighborRank]+1 ; //Ali } continousNoAdh = 0; } else { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } } if (j == activeMembrNodeCountHost[i] - 1 && adhIndxHost[index] == -1) { int k = 0; uint indexNew; while (k < activeMembrNodeCountHost[i] - 1) { indexNew = i * allocPara_m.maxAllNodePerCell + k; if (adhIndxHost[indexNew] == -1) { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } k++; } else { break; } } } } cellStatsData.isBdryCell = isBdry; cellStatsData.numNeighbors = neighbors.size(); cellStatsData.currentActiveMembrNodes = activeMembrNodeCountHost[i]; cellStatsData.currentActiveIntnlNodes = activeIntnlNodeCountHost[i]; cellStatsData.neighborVec = neighbors; cellStatsData.neighborVecV = neighborsV; //Ali for (int iiii=0; iiii<10 ; iiii++){ cellStatsData.cellNeighborStrength[iiii] = neighborStrength[iiii]; } //Ali cellStatsData.membrGrowthProgress = growthProMembrVecHost[i]; cellStatsData.cellCenter = CVector(centerCoordXHost[i], centerCoordYHost[i], 0); cellStatsData.cellArea = cellAreaHost[i]; cellStatsData.cellPerim = cellPerimHost[i];//AAMIRI cellStatsData.cellDpp = cellDppHost[i];//Ali result.cellsStats.push_back(cellStatsData); sumX=sumX+cellStatsData.cellCenter.x ; sumY=sumY+cellStatsData.cellCenter.y ; } //Ali if (BdryApproach==2) { cout << "sumX=" << sumX << endl ; cout << "sumY=" << sumY << endl ; cntr_X_Domain=sumX/result.cellsStats.size() ; cntr_Y_Domain=sumY/result.cellsStats.size() ; cout << "cntr_X=" << cntr_X_Domain << endl ; cout << "cntr_Y=" << cntr_Y_Domain << endl ; double R_Max ; double Distance ; R_Max=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > R_Max) { R_Max=Distance ; } } cout << "R_Max=" << R_Max << endl ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > 0.9* R_Max) { result.cellsStats[i].isBdryCell = true; cout << "isBdryCell"<< i<< endl ; } else { result.cellsStats[i].isBdryCell = false; cout << "isNormalCell"<< i << endl ; } } } //Ali cout << "I want to write data" << endl ; // ofstream Stress_Strain_Single ; //Stress_Strain_Single.open("Stress_Strain_Single.txt"); //Stress_Strain_Single.close() ; //Ali result.MaxDistanceX=abs(centerCoordXHost[1]-centerCoordXHost[0]); //Ali result.Cells_Extrem_Loc[0]=Tisu_MinX; result.Cells_Extrem_Loc[1]=Tisu_MaxX; result.Cells_Extrem_Loc[2]=Tisu_MinY; result.Cells_Extrem_Loc[3]=Tisu_MaxY ; result.F_Ext_Out=membrPara.F_Ext_Incline*curTime ; //if (dt==curTime) { //result.Init_Displace=MaxX-MinX ; // } //Ali return result; } __device__ bool bigEnough(double& num) { if (num > minDivisor) { return true; } else { return false; } } __device__ double cross_Z(double vecA_X, double vecA_Y, double vecB_X, double vecB_Y) { return vecA_X * vecB_Y - vecA_Y * vecB_X; } /* __device__ double calBendMulti(double& angle, uint activeMembrCt) { double equAngle = PI - PI / activeMembrCt; return bendCoeff * (angle - equAngle); } */ //AAMIRI __device__ double calBendMulti_Mitotic(double& angle, uint activeMembrCt, double& progress, double mitoticCri) { double equAngle = PI - PI / activeMembrCt; if (progress <= mitoticCri){ return bendCoeff * (angle - equAngle);} else{ return (angle - equAngle)*(bendCoeff + (bendCoeff_Mitotic - bendCoeff) * (progress - mitoticCri)/(1.0 - mitoticCri)); } } void SceCells::applySceCellDisc_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; thrust::counting_iterator<uint> iBegin(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //Ali added for cell pressure calculation nodes->getInfoVecs().nodeF_MI_M_y.begin())),// ALi added for cell pressure calculation AddSceCellForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M)); } __device__ void calAndAddIB_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } //Ali function added for eventually computing pressure for each cells __device__ void calAndAddIB_M2(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double & F_MI_M_x, double & F_MI_M_y, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } F_MI_M_x=F_MI_M_x+forceValue * (xPos2 - xPos) / linkLength; F_MI_M_y=F_MI_M_y+forceValue * (yPos2 - yPos) / linkLength; xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void calAndAddII_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIIDiv_M[4]) { forceValue = -sceIIDiv_M[0] / sceIIDiv_M[2] * exp(-linkLength / sceIIDiv_M[2]) + sceIIDiv_M[1] / sceIIDiv_M[3] * exp(-linkLength / sceIIDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIIDiv_M[4]) + (1.0 - percent) * sceII_M[4]; if (linkLength < lenLimit) { double intraPara0 = percent * (sceIIDiv_M[0]) + (1.0 - percent) * sceII_M[0]; double intraPara1 = percent * (sceIIDiv_M[1]) + (1.0 - percent) * sceII_M[1]; double intraPara2 = percent * (sceIIDiv_M[2]) + (1.0 - percent) * sceII_M[2]; double intraPara3 = percent * (sceIIDiv_M[3]) + (1.0 - percent) * sceII_M[3]; forceValue = -intraPara0 / intraPara2 * exp(-linkLength / intraPara2) + intraPara1 / intraPara3 * exp(-linkLength / intraPara3); } } else { if (linkLength < sceII_M[4]) { forceValue = -sceII_M[0] / sceII_M[2] * exp(-linkLength / sceII_M[2]) + sceII_M[1] / sceII_M[3] * exp(-linkLength / sceII_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; }
f531f1120e603730e348fbd9e32f3b29d3c36e5f.cu
#include "SceCells.h" #include <cmath> double epsilon = 1.0e-12; __constant__ double membrEquLen; __constant__ double membrStiff; __constant__ double membrStiff_Mitotic; //Ali June 30 __constant__ double pI; __constant__ double minLength; __constant__ double minDivisor; __constant__ uint maxAllNodePerCell; __constant__ uint maxMembrPerCell; __constant__ uint maxIntnlPerCell; __constant__ double bendCoeff; __constant__ double bendCoeff_Mitotic;//AAMIRI __constant__ double sceIB_M[5]; __constant__ double sceIBDiv_M[5]; __constant__ double sceII_M[5]; __constant__ double sceIIDiv_M[5]; __constant__ double grthPrgrCriEnd_M; __constant__ double F_Ext_Incline_M2 ; //Ali //Ali & Abu June 30th __device__ double calMembrForce_Mitotic(double& length, double& progress, double mitoticCri) { if (progress <= mitoticCri) { return (length - membrEquLen) * membrStiff; } else { return (length - membrEquLen) *(membrStiff+ (membrStiff_Mitotic-membrStiff)* (progress-mitoticCri)/(1.0-mitoticCri)); } } // //Ali __device__ double calExtForce(double& curTime) { return curTime * F_Ext_Incline_M2; } //Ali __device__ double obtainRandAngle(uint& cellRank, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(cellRank); thrust::uniform_real_distribution<double> u0Pi(0, 2.0 * pI); double randomAngle = u0Pi(rng); return randomAngle; } __device__ uint obtainNewIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount); } //AAMIRI __device__ uint obtainLastIntnlNodeIndex(uint& cellRank, uint& curActiveCount) { return (cellRank * maxAllNodePerCell + maxMembrPerCell + curActiveCount ); } //AAMIRI __device__ uint obtainMembEndNode(uint& cellRank, uint& activeMembrNodeThis) { return (cellRank * maxAllNodePerCell + activeMembrNodeThis - 1 ); } __device__ bool isAllIntnlFilled(uint& currentIntnlCount) { if (currentIntnlCount < maxIntnlPerCell) { return false; } else { return true; } } //AAMIRI __device__ int obtainRemovingMembrNodeID(uint &cellRank, uint& activeMembrNodes, uint& seed) { thrust::default_random_engine rng(seed); // discard n numbers to avoid correlation rng.discard(activeMembrNodes); thrust::uniform_int_distribution<double> dist(0, activeMembrNodes-1); int randomNode = dist(rng); return (cellRank * maxAllNodePerCell + randomNode); } //AAMIRI __device__ bool isAllIntnlEmptied(uint& currentIntnlCount) { if (currentIntnlCount > 0) { return false; } else { return true; } } //AAMIRI __device__ bool isAllMembrEmptied(uint& currentMembrCount) { if (currentMembrCount > 0) { return false; } else { return true; } } __device__ bool longEnough(double& length) { if (length > minLength) { return true; } else { return false; } } __device__ double compDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) { return sqrt( (xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)); } void SceCells::distributeBdryIsActiveInfo() { thrust::fill(nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, true); } void SceCells::distributeProfileIsActiveInfo() { thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosProfile + nodes->getAllocPara().currentActiveProfileNodeCount, true); } void SceCells::distributeECMIsActiveInfo() { uint totalNodeCountForActiveECM = allocPara.currentActiveECM * allocPara.maxNodePerECM; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveECM); thrust::fill( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosECM, nodes->getInfoVecs().nodeIsActive.begin() + totalNodeCountForActiveECM + allocPara.startPosECM, true); } void SceCells::distributeCellIsActiveInfo() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::transform( thrust::make_transform_iterator(countingBegin, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_transform_iterator(countingEnd, ModuloFunctor(allocPara.maxNodeOfOneCell)), thrust::make_permutation_iterator( cellInfoVecs.activeNodeCountOfThisCell.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::less<uint>()); } void SceCells::distributeCellGrowthProgress() { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara.startPosCells); } void MembrPara::initFromConfig() { membrEquLenCPU = globalConfigVars.getConfigValue("MembrEquLen").toDouble(); membrStiffCPU = globalConfigVars.getConfigValue("MembrStiff").toDouble(); membrStiff_Mitotic = globalConfigVars.getConfigValue("MembrStiff_Mitotic").toDouble(); //Ali June30 membrGrowCoeff_Ori = globalConfigVars.getConfigValue("MembrGrowCoeff").toDouble(); membrGrowLimit_Ori = globalConfigVars.getConfigValue("MembrGrowLimit").toDouble(); membrGrowCoeff = membrGrowCoeff_Ori; membrGrowLimit = membrGrowLimit_Ori; //Ali F_Ext_Incline = globalConfigVars.getConfigValue("FExtIncline").toDouble(); //Ali membrBendCoeff = globalConfigVars.getConfigValue("MembrBenCoeff").toDouble(); //AAMIRI membrBendCoeff_Mitotic = globalConfigVars.getConfigValue("MembrBenCoeff_Mitotic").toDouble(); adjustLimit = globalConfigVars.getConfigValue("MembrAdjustLimit").toDouble(); adjustCoeff = globalConfigVars.getConfigValue("MembrAdjustCoeff").toDouble(); growthConst_N = globalConfigVars.getConfigValue("MembrGrowthConst").toDouble(); initMembrCt_N = globalConfigVars.getConfigValue("InitMembrNodeCount").toInt(); initIntnlCt_N = globalConfigVars.getConfigValue("InitCellNodeCount").toInt(); } SceCells::SceCells() { //curTime = 0 + 55800.0;//AAMIRI // Ali I comment that our safely on 04/04/2017 std ::cout << "I am in SceCells constructor with zero element "<<InitTimeStage<<std::endl ; } void SceCells::growAtRandom(double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; // randomly select growth direction and speed. randomizeGrowth(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); //std::cout << "after apply stretch force" << std::endl; cellChemotaxis(); //std::cout << "after apply cell chemotaxis" << std::endl; addPointIfScheduledToGrow(); //std::cout << "after adding node" << std::endl; } /** * Use the growth magnitude and dt to update growthProgress. */ void SceCells::updateGrowthProgress() { thrust::transform(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); } /** * Decide if the cells are going to add a node or not. * Use lastCheckPoint and growthProgress to decide whether add point or not */ void SceCells::decideIsScheduleToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } /** * Calculate target length of cell given the cell growth progress. * length is along the growth direction. */ void SceCells::computeCellTargetLength() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } /** * Compute distance of each node to its corresponding cell center. * The distantce could be either positive or negative, depending on the pre-defined * growth direction. */ void SceCells::computeDistToCellCenter() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } /** * For nodes of each cell, find the maximum and minimum distance to the center. * We will then calculate the current length of a cell along its growth direction * using max and min distance to the center. */ void SceCells::findMinAndMaxDistToCenter() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } /** * Compute the difference for cells between their expected length and current length. */ void SceCells::computeLenDiffExpCur() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } /** * Use the difference that just computed and growthXDir&growthYDir * to apply stretching force (velocity) on nodes of all cells */ void SceCells::stretchCellGivenLenDiff() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyStretchForce(bioPara.elongationCoefficient)); } /** * This is just an attempt. Cells move according to chemicals. */ void SceCells::cellChemotaxis() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.growthSpeed.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(countingBegin, DivideFunctor( allocPara.maxNodeOfOneCell))), nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeVelY.begin() + allocPara.startPosCells)), ApplyChemoVel(bioPara.chemoCoefficient)); } /** * Adjust the velocities of nodes. * For example, velocity of boundary nodes must be zero. */ void SceCells::adjustNodeVel() { thrust::counting_iterator<uint> countingIterBegin(0); thrust::counting_iterator<uint> countingIterEnd( totalNodeCountForActiveCells + allocPara.startPosCells); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin(), countingIterBegin)) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), VelocityModifier(allocPara.startPosProfile, allocPara.currentActiveProfileNodeCount)); } /** * Move nodes according to the velocity we just adjusted. */ void SceCells::moveNodes() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2(dt)); } /** * Add a point to a cell if it is scheduled to grow. * This step does not guarantee success ; If adding new point failed, it will not change * isScheduleToGrow and activeNodeCount; */ void SceCells::addPointIfScheduledToGrow() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), countingBegin, cellInfoVecs.lastCheckPoint.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.lastCheckPoint.begin())), AddPtOp(allocPara.maxNodeOfOneCell, miscPara.addNodeDistance, miscPara.minDistanceToOtherNode, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, time(NULL), miscPara.growThreshold)); } //Ali commented this constructor in 04/04/2017 SceCells::SceCells(SceNodes* nodesInput, std::vector<uint>& numOfInitActiveNodesOfCells, std::vector<SceNodeType>& cellTypes) : countingBegin(0), initIntnlNodeCount( nodesInput->getAllocPara().maxNodeOfOneCell / 2), initGrowthProgress( 0.0) { curTime = 0.0 + 55800.0;//AAMIRI std ::cout << "I am in SceCells constructor with polymorphism shape "<<InitTimeStage<<std::endl ; initialize(nodesInput); copyInitActiveNodeCount(numOfInitActiveNodesOfCells); thrust::device_vector<SceNodeType> cellTypesToPass = cellTypes; setCellTypes(cellTypesToPass); distributeIsActiveInfo(); } SceCells::SceCells(SceNodes* nodesInput, std::vector<uint>& initActiveMembrNodeCounts, std::vector<uint>& initActiveIntnlNodeCounts, std::vector<double> &initGrowProgVec, double InitTimeStage) { // curTime = 0.0 + 55800.0;//AAMIRIi curTime=InitTimeStage ; std ::cout << "I am in SceCells constructor with number of inputs "<<InitTimeStage<<std::endl ; lastTimeExchange=0 ; firstTimeReadDpp=true ; //currentActiveCellCountOld=1 ; // small number tmpDebug = false; aniDebug = false; membrPara.initFromConfig(); shrinkRatio = globalConfigVars.getConfigValue("ShrinkRatio").toDouble(); centerShiftRatio = globalConfigVars.getConfigValue("CenterShiftRatio").toDouble(); memNewSpacing = globalConfigVars.getConfigValue("MembrLenDiv").toDouble(); initialize_M(nodesInput); cout<< "size of dpp in constructor is "<< cellInfoVecs.cell_Dpp.size() << endl ; copyToGPUConstMem(); copyInitActiveNodeCount_M(initActiveMembrNodeCounts, initActiveIntnlNodeCounts, initGrowProgVec); } void SceCells::initCellInfoVecs() { cellInfoVecs.growthProgress.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.expectedLength.resize(allocPara.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara.maxCellCount); cellInfoVecs.activeNodeCountOfThisCell.resize(allocPara.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara.maxCellCount); cellInfoVecs.cellTypes.resize(allocPara.maxCellCount, MX); cellInfoVecs.isScheduledToGrow.resize(allocPara.maxCellCount, false); cellInfoVecs.centerCoordX.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara.maxCellCount); cellInfoVecs.cellRanksTmpStorage.resize(allocPara.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara.maxCellCount, false); } void SceCells::initCellInfoVecs_M() { //std::cout << "max cell count = " << allocPara_m.maxCellCount << std::endl; cellInfoVecs.Cell_Damp.resize(allocPara_m.maxCellCount, 36.0); //Ali cellInfoVecs.cell_Dpp.resize(allocPara_m.maxCellCount, 0.0); //Ali cellInfoVecs.cell_DppOld.resize(allocPara_m.maxCellCount, 0.0); //Ali //cout<< "size of dpp in init is "<< cellInfoVecs.cell_Dpp.size() << endl ; cellInfoVecs.growthProgress.resize(allocPara_m.maxCellCount, 0.0); //A&A cellInfoVecs.growthProgressOld.resize(allocPara_m.maxCellCount, 0.0);//Ali cellInfoVecs.Cell_Time.resize(allocPara_m.maxCellCount, 0.0); //Ali cellInfoVecs.expectedLength.resize(allocPara_m.maxCellCount, bioPara.cellInitLength); cellInfoVecs.lengthDifference.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.smallestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.biggestDistance.resize(allocPara_m.maxCellCount); cellInfoVecs.activeMembrNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.activeIntnlNodeCounts.resize(allocPara_m.maxCellCount); cellInfoVecs.lastCheckPoint.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.isDividing.resize(allocPara_m.maxCellCount); cellInfoVecs.isEnteringMitotic.resize(allocPara_m.maxCellCount, false); //A&A //cellInfoVecs.isRemoving.resize(allocPara.maxCellCount);//AAMIRI cellInfoVecs.isScheduledToGrow.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isScheduledToShrink.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.isCellActive.resize(allocPara_m.maxCellCount, false);//AAMIRI cellInfoVecs.centerCoordX.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordY.resize(allocPara_m.maxCellCount); cellInfoVecs.centerCoordZ.resize(allocPara_m.maxCellCount); cellInfoVecs.HertwigXdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.HertwigYdir.resize(allocPara_m.maxCellCount,0.0); //A&A cellInfoVecs.cellRanksTmpStorage.resize(allocPara_m.maxCellCount); cellInfoVecs.growthSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.growthXDir.resize(allocPara_m.maxCellCount); cellInfoVecs.growthYDir.resize(allocPara_m.maxCellCount); cellInfoVecs.isRandGrowInited.resize(allocPara_m.maxCellCount, false); cellInfoVecs.isMembrAddingNode.resize(allocPara_m.maxCellCount, false); cellInfoVecs.maxTenIndxVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxTenRiVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxDistToRiVec.resize(allocPara_m.maxCellCount); //Ali cellInfoVecs.maxTenRiMidXVec.resize(allocPara_m.maxCellCount); cellInfoVecs.maxTenRiMidYVec.resize(allocPara_m.maxCellCount); cellInfoVecs.aveTension.resize(allocPara_m.maxCellCount); cellInfoVecs.membrGrowProgress.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.membrGrowSpeed.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellAreaVec.resize(allocPara_m.maxCellCount, 0.0); cellInfoVecs.cellPerimVec.resize(allocPara_m.maxCellCount, 0.0);//AAMIRI std::cout << "finished " << std::endl; } void SceCells::initCellNodeInfoVecs() { cellNodeInfoVecs.cellRanks.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara.maxTotalCellNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara.maxTotalCellNodeCount); } void SceCells::initCellNodeInfoVecs_M() { std::cout << "max total node count = " << allocPara_m.maxTotalNodeCount << std::endl; cellNodeInfoVecs.cellRanks.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeXPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeYPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.activeZPoss.resize(allocPara_m.maxTotalNodeCount); cellNodeInfoVecs.distToCenterAlongGrowDir.resize( allocPara_m.maxTotalNodeCount); } void SceCells::initGrowthAuxData() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara.startPosCells])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara.startPosCells])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara.startPosCells])); growthAuxData.randomGrowthSpeedMin = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randGenAuxPara = globalConfigVars.getConfigValue( "RandomGenerationAuxPara").toDouble(); if (controlPara.simuType == SingleCellTest) { growthAuxData.fixedGrowthSpeed = globalConfigVars.getConfigValue( "FixedGrowthSpeed").toDouble(); } } void SceCells::initGrowthAuxData_M() { growthAuxData.nodeIsActiveAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[allocPara_m.bdryNodeCount])); growthAuxData.nodeXPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[allocPara_m.bdryNodeCount])); growthAuxData.nodeYPosAddress = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[allocPara_m.bdryNodeCount])); growthAuxData.adhIndxAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeAdhereIndex[allocPara_m.bdryNodeCount])); growthAuxData.randomGrowthSpeedMin_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMin").toDouble(); growthAuxData.randomGrowthSpeedMax_Ori = globalConfigVars.getConfigValue( "RandomGrowthSpeedMax").toDouble(); growthAuxData.randomGrowthSpeedMin = growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.randomGrowthSpeedMax_Ori; growthAuxData.grthPrgrCriVal_M_Ori = globalConfigVars.getConfigValue( "GrowthPrgrCriVal").toDouble(); growthAuxData.grthProgrEndCPU = globalConfigVars.getConfigValue( "GrowthPrgrValEnd").toDouble(); } void SceCells::initialize(SceNodes* nodesInput) { nodes = nodesInput; controlPara = nodes->getControlPara(); readMiscPara(); readBioPara(); allocPara = nodesInput->getAllocPara(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); initCellInfoVecs(); initCellNodeInfoVecs(); initGrowthAuxData(); distributeIsCellRank(); } void SceCells::initialize_M(SceNodes* nodesInput) { std::cout << "Initializing cells ...... " << std::endl; //std::cout.flush(); nodes = nodesInput; allocPara_m = nodesInput->getAllocParaM(); // max internal node count must be even number. assert(allocPara_m.maxIntnlNodePerCell % 2 == 0); //std::cout << "break point 1 " << std::endl; //std::cout.flush(); controlPara = nodes->getControlPara(); //std::cout << "break point 2 " << std::endl; //std::cout.flush(); readMiscPara_M(); //std::cout << "break point 3 " << std::endl; //std::cout.flush(); initCellInfoVecs_M(); cout<< "size of dpp initilizie is "<< cellInfoVecs.cell_Dpp.size() << endl ; //std::cout << "break point 4 " << std::endl; //std::cout.flush(); readBioPara(); //std::cout << "break point 5 " << std::endl; //std::cout.flush(); //std::cout << "break point 6 " << std::endl; //std::cout.flush(); initCellNodeInfoVecs_M(); //std::cout << "break point 7 " << std::endl; //std::cout.flush(); initGrowthAuxData_M(); //std::cout << "break point 8 " << std::endl; //std::cout.flush(); } void SceCells::copyInitActiveNodeCount( std::vector<uint>& numOfInitActiveNodesOfCells) { thrust::copy(numOfInitActiveNodesOfCells.begin(), numOfInitActiveNodesOfCells.end(), cellInfoVecs.activeNodeCountOfThisCell.begin()); } void SceCells::allComponentsMove() { adjustNodeVel(); moveNodes(); } /** * Mark cell node as either activdistributeIsActiveInfo()e or inactive. * left part of the node array will be active and right part will be inactive. * the threshold is defined by array activeNodeCountOfThisCell. * e.g. activeNodeCountOfThisCell = {2,3} and maxNodeOfOneCell = 5 */ void SceCells::distributeIsActiveInfo() { //std::cout << "before distribute bdry isActive" << std::endl; distributeBdryIsActiveInfo(); //std::cout << "before distribute profile isActive" << std::endl; distributeProfileIsActiveInfo(); //std::cout << "before distribute ecm isActive" << std::endl; distributeECMIsActiveInfo(); //std::cout << "before distribute cells isActive" << std::endl; distributeCellIsActiveInfo(); } void SceCells::distributeIsCellRank() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingCellEnd( totalNodeCountForActiveCells); std::cerr << "totalNodeCount for active cells " << totalNodeCountForActiveCells << std::endl; //thrust::counting_iterator<uint> countingECMEnd(countingECMEnd); // only computes the cell ranks of cells. the rest remain unchanged. thrust::transform(countingBegin, countingCellEnd, nodes->getInfoVecs().nodeCellRank.begin() + allocPara.startPosCells, DivideFunctor(allocPara.maxNodeOfOneCell)); std::cerr << "finished cellRank transformation" << std::endl; } /** * This method computes center of all cells. * more efficient then simply iterating the cell because of parallel reducing. */ void SceCells::computeCenterPos() { uint totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); uint totalNumberOfActiveNodes = thrust::reduce( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), isTrue()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalNumberOfActiveNodes, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin(), cellNodeInfoVecs.activeZPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::equal_to<uint>(), CVec3Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())) + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordZ.begin())), CVec3Divide()); } /** * 2D version of cell division. * Division process is done by creating two temporary vectors to hold the node information * that are going to divide. * * step 1: based on lengthDifference, expectedLength and growthProgress, * this process determines whether a certain cell is ready to divide and then assign * a boolean value to isDivided. * * step 2. copy those cells that will divide in to the temp vectors created * * step 3. For each cell in the temp vectors, we sort its nodes by its distance to the * corresponding cell center. * This step is not very effcient when the number of cells going to divide is big. * but this is unlikely to happen because cells will divide according to external chemical signaling * and each will have different divide progress. * * step 4. copy the right part of each cell of the sorted array (temp1) to left part of each cell of * another array * * step 5. transform isActive vector of both temp1 and temp2, making only left part of each cell active. * * step 6. insert temp2 to the end of the cell array * * step 7. copy temp1 to the previous position of the cell array. * * step 8. add activeCellCount of the system. * * step 9. mark isDivide of all cells to false. */ void SceCells::divide2DSimplified() { bool isDivisionPresent = decideIfGoingToDivide(); if (!isDivisionPresent) { return; } copyCellsPreDivision(); sortNodesAccordingToDist(); copyLeftAndRightToSeperateArrays(); transformIsActiveArrayOfBothArrays(); addSecondArrayToCellArray(); copyFirstArrayToPreviousPos(); updateActiveCellCount(); markIsDivideFalse(); } bool SceCells::decideIfGoingToDivide() { // step 1 thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lengthDifference.begin(), cellInfoVecs.expectedLength.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeNodeCountOfThisCell.begin())) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isDividing.begin(), cellInfoVecs.growthProgress.begin())), CompuIsDivide(miscPara.isDivideCriticalRatio, allocPara.maxNodeOfOneCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } void SceCells::copyCellsPreDivision() { // step 2 : copy all cell rank and distance to its corresponding center with divide flag = 1 totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara.maxNodeOfOneCell; divAuxData.tmpIsActiveHold1 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpDistToCenter1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRankHold1 = thrust::device_vector<uint>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold1 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellTypes = thrust::device_vector<SceNodeType>( divAuxData.nodeStorageCount); divAuxData.tmpIsActiveHold2 = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpDistToCenter2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpXValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpZValueHold2 = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeCellType.begin() + allocPara.startPosCells)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara.maxNodeOfOneCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRankHold1.begin(), divAuxData.tmpDistToCenter1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin(), divAuxData.tmpCellTypes.begin())), isTrue()); } /** * performance wise, this implementation is not the best because I can use only one sort_by_key * with speciialized comparision operator. However, This implementation is more robust and won't * compromise performance too much. */ void SceCells::sortNodesAccordingToDist() { //step 3 for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { thrust::sort_by_key( divAuxData.tmpDistToCenter1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpDistToCenter1.begin() + (i + 1) * allocPara.maxNodeOfOneCell, thrust::make_zip_iterator( thrust::make_tuple( divAuxData.tmpXValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpYValueHold1.begin() + i * allocPara.maxNodeOfOneCell, divAuxData.tmpZValueHold1.begin() + i * allocPara.maxNodeOfOneCell))); } } /** * scatter_if() is a thrust function. * inputIter1 first, * inputIter1 last, * inputIter2 map, * inputIter3 stencil * randomAccessIter output */ void SceCells::copyLeftAndRightToSeperateArrays() { //step 4. thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), make_transform_iterator(countingBegin, LeftShiftFunctor(allocPara.maxNodeOfOneCell)), make_transform_iterator(countingBegin, IsRightSide(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXValueHold2.begin(), divAuxData.tmpYValueHold2.begin(), divAuxData.tmpZValueHold2.begin()))); } void SceCells::transformIsActiveArrayOfBothArrays() { thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold1.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); thrust::transform(countingBegin, countingBegin + divAuxData.nodeStorageCount, divAuxData.tmpIsActiveHold2.begin(), IsLeftSide(allocPara.maxNodeOfOneCell)); if (divAuxData.toBeDivideCount != 0) { std::cout << "before insert, active cell count in nodes:" << nodes->getAllocPara().currentActiveCellCount << std::endl; } } void SceCells::addSecondArrayToCellArray() { /// step 6. call SceNodes function to add newly divided cells nodes->addNewlyDividedCells(divAuxData.tmpXValueHold2, divAuxData.tmpYValueHold2, divAuxData.tmpZValueHold2, divAuxData.tmpIsActiveHold2, divAuxData.tmpCellTypes); } void SceCells::copyFirstArrayToPreviousPos() { thrust::scatter( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.begin(), divAuxData.tmpXValueHold1.begin(), divAuxData.tmpYValueHold1.begin(), divAuxData.tmpZValueHold1.begin())), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActiveHold1.end(), divAuxData.tmpXValueHold1.end(), divAuxData.tmpYValueHold1.end(), divAuxData.tmpZValueHold1.end())), thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple(countingBegin, divAuxData.tmpCellRankHold1.begin())), CompuPos(allocPara.maxNodeOfOneCell)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocX.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocY.begin() + allocPara.startPosCells, nodes->getInfoVecs().nodeLocZ.begin() + allocPara.startPosCells))); /** * after dividing, the cell should resume the initial * (1) node count, which defaults to be half size of max node count * (2) growth progress, which defaults to 0 * (3) last check point, which defaults to 0 */ thrust::scatter_if( thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)), thrust::make_zip_iterator( thrust::make_tuple(initIntnlNodeCount, initGrowthProgress, initGrowthProgress)) + allocPara.currentActiveCellCount, countingBegin, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeNodeCountOfThisCell.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), isTrue()); // TODO: combine this one with the previous scatter_if to improve efficiency. thrust::fill( cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount, cellInfoVecs.activeNodeCountOfThisCell.begin() + allocPara.currentActiveCellCount + divAuxData.toBeDivideCount, allocPara.maxNodeOfOneCell / 2); } void SceCells::updateActiveCellCount() { allocPara.currentActiveCellCount = allocPara.currentActiveCellCount + divAuxData.toBeDivideCount; NodeAllocPara para = nodes->getAllocPara(); para.currentActiveCellCount = allocPara.currentActiveCellCount; nodes->setAllocPara(para); } void SceCells::markIsDivideFalse() { thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara.currentActiveCellCount, false); } void SceCells::readMiscPara() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxNodeOfOneCell = globalConfigVars.getConfigValue("MaxNodePerCell").toInt(); miscPara.growThreshold = 1.0 / (maxNodeOfOneCell - maxNodeOfOneCell / 2) + epsilon; } void SceCells::readMiscPara_M() { miscPara.addNodeDistance = globalConfigVars.getConfigValue( "DistanceForAddingNode").toDouble(); miscPara.minDistanceToOtherNode = globalConfigVars.getConfigValue( "MinDistanceToOtherNode").toDouble(); miscPara.isDivideCriticalRatio = globalConfigVars.getConfigValue( "IsDivideCrticalRatio").toDouble(); // reason for adding a small term here is to avoid scenario when checkpoint might add many times // up to 0.99999999 which is theoretically 1.0 but not in computer memory. If we don't include // this small term we might risk adding one more node. int maxIntnlNodePerCell = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); miscPara.growThreshold = 1.0 / (maxIntnlNodePerCell - maxIntnlNodePerCell / 2) + epsilon; miscPara.prolifDecayCoeff = globalConfigVars.getConfigValue( "ProlifDecayCoeff").toDouble(); } void SceCells::readBioPara() { if (controlPara.simuType != Disc_M) { bioPara.cellInitLength = globalConfigVars.getConfigValue( "CellInitLength").toDouble(); std::cout << "break point 1 " << bioPara.cellInitLength << std::endl; std::cout.flush(); bioPara.cellFinalLength = globalConfigVars.getConfigValue( "CellFinalLength").toDouble(); std::cout << "break point 2 " << bioPara.cellFinalLength << std::endl; std::cout.flush(); bioPara.elongationCoefficient = globalConfigVars.getConfigValue( "ElongateCoefficient").toDouble(); std::cout << "break point 3 " << bioPara.elongationCoefficient << std::endl; std::cout.flush(); } if (controlPara.simuType == Beak) { std::cout << "break point 4 " << std::endl; std::cout.flush(); bioPara.chemoCoefficient = globalConfigVars.getConfigValue( "ChemoCoefficient").toDouble(); } //std::cin >> jj; } void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } /** * To run all the cell level logics. * First step we got center positions of cells. * Grow. */ void SceCells::runAllCellLevelLogicsDisc(double dt) { this->dt = dt; //std::cerr << "enter run all cell level logics" << std::endl; computeCenterPos(); //std::cerr << "after compute center position." << std::endl; if (nodes->getControlPara().controlSwitchs.stab == OFF) { growAtRandom(dt); //grow2DTwoRegions(dt, region1, region2); //std::cerr << "after grow cells" << std::endl; //distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; divide2DSimplified(); //std::cerr << "after divide 2D simplified." << std::endl; distributeIsActiveInfo(); //std::cerr << "after distribute is active info." << std::endl; distributeCellGrowthProgress(); } allComponentsMove(); //std::cerr << "after all components move." << std::endl; } //Ali void SceCells::runAllCellLogicsDisc_M(double dt) { void SceCells::runAllCellLogicsDisc_M(double dt, double Damp_Coef, double InitTimeStage) { //Ali std::cout << " *** 1 ***" << endl; std::cout.flush(); this->dt = dt; this->Damp_Coef=Damp_Coef ; //Ali this->InitTimeStage=InitTimeStage ; //A & A growthAuxData.prolifDecay =1.0 ; // no decay for right now exp(-curTime * miscPara.prolifDecayCoeff); cout<< "The important curTime used in simulation is here which is"<<curTime <<endl; growthAuxData.randomGrowthSpeedMin = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMin_Ori; growthAuxData.randomGrowthSpeedMax = growthAuxData.prolifDecay * growthAuxData.randomGrowthSpeedMax_Ori; curTime = curTime + dt; std::cout << " *** 2 ***" << endl; std::cout.flush(); applySceCellDisc_M(); std::cout << " *** 3 ***" << endl; std::cout.flush(); //Ali computeCenterPos_M(); exchSignal(); BC_Imp_M() ; std::cout << " *** 3.5 ***" << endl; std::cout.flush(); //Ali applyMemForce_M(); std::cout << " *** 4 ***" << endl; std::cout.flush(); //Ali cmment // // computeCenterPos_M(); std::cout << " *** 5 ***" << endl; std::cout.flush(); //Ali cmment // growAtRandom_M(dt); std::cout << " *** 6 ***" << endl; std::cout.flush(); //if (curTime<3300.0) divide2D_M(); std::cout << " *** 7 ***" << endl; std::cout.flush(); distributeCellGrowthProgress_M(); std::cout << " *** 8 ***" << endl; std::cout.flush(); findTangentAndNormal_M();//AAMIRI ADDED May29 allComponentsMove_M(); std::cout << " *** 9 ***" << endl; std::cout.flush(); handleMembrGrowth_M(); std::cout << " *** 10 ***" << endl; std::cout.flush(); } void SceCells::exchSignal(){ if (firstTimeReadDpp) { uint maxTotalNodes=nodes->getInfoVecs().nodeLocX.size() ; signal.Initialize(allocPara_m.maxAllNodePerCell,allocPara_m.maxMembrNodePerCell,maxTotalNodes, allocPara_m.maxCellCount) ; cout << " I passed the initializtion for signaling module" << endl ; } lastTimeExchange=lastTimeExchange+dt ; cout << "last time exchange is " << lastTimeExchange << endl ; cout << "dt is " << dt << endl ; double exchPeriod=1 ; if ( lastTimeExchange>exchPeriod) { lastTimeExchange=0 ; //vector<CVector> cellCentersHost ; //cellCentersHost=getAllCellCenters(); //Ali cout << "I entered the function to update dpp" << endl ; thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+ allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+ allocPara_m.currentActiveCellCount) ; Tisu_MinX= *MinX_Itr ; Tisu_MaxX= *MaxX_Itr ; Tisu_MinY= *MinY_Itr ; Tisu_MaxY= *MaxY_Itr ; Tisu_R=0.5*(0.5*(Tisu_MaxX-Tisu_MinX)+0.5*(Tisu_MaxY-Tisu_MinY)) ; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust:: copy (nodes->getInfoVecs().nodeIsActive.begin(),nodes->getInfoVecs().nodeIsActive.begin()+ totalNodeCountForActiveCells, signal.nodeIsActiveHost.begin()); thrust:: copy (nodes->getInfoVecs().nodeLocX.begin(),nodes->getInfoVecs().nodeLocX.begin()+ totalNodeCountForActiveCells, signal.nodeLocXHost.begin()); thrust:: copy (nodes->getInfoVecs().nodeLocY.begin(),nodes->getInfoVecs().nodeLocY.begin()+ totalNodeCountForActiveCells, signal.nodeLocYHost.begin()); thrust:: copy (cellInfoVecs.centerCoordX.begin(),cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterX.begin()); thrust:: copy (cellInfoVecs.centerCoordY.begin(),cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, signal.cellCenterY.begin()); signal.updateSignal(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,curTime,totalNodeCountForActiveCells,allocPara_m.currentActiveCellCount) ; //Ali assert(cellInfoVecs.cell_Dpp.size()==signal.dppLevel.size()); thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_Dpp.begin()) ; //currentActiveCellCountOld=allocPara_m.currentActiveCellCount; } if (firstTimeReadDpp) { thrust::copy(signal.dppLevel.begin(),signal.dppLevel.end(),cellInfoVecs.cell_DppOld.begin()) ; firstTimeReadDpp=false ; } } void SceCells::runStretchTest(double dt) { this->dt = dt; computeCenterPos(); growAlongX(false, dt); moveNodes(); } void SceCells::growAlongX(bool isAddPt, double d_t) { totalNodeCountForActiveCells = allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell; setGrowthDirXAxis(); //std::cout << "after copy grow info" << std::endl; updateGrowthProgress(); //std::cout << "after update growth progress" << std::endl; decideIsScheduleToGrow(); //std::cout << "after decode os schedule to grow" << std::endl; computeCellTargetLength(); //std::cout << "after compute cell target length" << std::endl; computeDistToCellCenter(); //std::cout << "after compute dist to center" << std::endl; findMinAndMaxDistToCenter(); //std::cout << "after find min and max dist" << std::endl; computeLenDiffExpCur(); //std::cout << "after compute diff " << std::endl; stretchCellGivenLenDiff(); if (isAddPt) { addPointIfScheduledToGrow(); } } void SceCells::growWithStress(double d_t) { } std::vector<CVector> SceCells::getAllCellCenters() { //void SceCells::getAllCellCenters() { //thrust::host_vector<double> centerX = cellInfoVecs.centerCoordX; //thrust::host_vector<double> centerY = cellInfoVecs.centerCoordY; //thrust::host_vector<double> centerZ = cellInfoVecs.centerCoordZ; thrust::host_vector<double> centerX( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin() + allocPara_m.currentActiveCellCount, centerX.begin()); thrust::host_vector<double> centerY( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin() + allocPara_m.currentActiveCellCount, centerY.begin()); thrust::host_vector<double> centerZ( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordZ.begin(), cellInfoVecs.centerCoordZ.begin() + allocPara_m.currentActiveCellCount, centerZ.begin()); //infoForSignal.sCenterX=centerX[4] ; //infoForSignal.sCenterY=centerY[4] ; //infoForSignal.sCenterZ=centerZ[4] ; std::vector<CVector> result; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { CVector pos = CVector(centerX[i], centerY[i], centerZ[i]); //infoForSignal.sCenterX=centerX[i] ; //infoForSignal.sCenterY=centerY[i] ; //infoForSignal.sCenterZ=centerZ[i] ; result.push_back(pos); } return result; } void SceCells::setGrowthDirXAxis() { thrust::fill(cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthXDir.begin() + allocPara.currentActiveCellCount, 1.0); thrust::fill(cellInfoVecs.growthYDir.begin(), cellInfoVecs.growthYDir.begin() + allocPara.currentActiveCellCount, 0.0); thrust::fill(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara.currentActiveCellCount, growthAuxData.fixedGrowthSpeed); } std::vector<double> SceCells::getGrowthProgressVec() { thrust::host_vector<double> growthProVec = cellInfoVecs.growthProgress; std::vector<double> result; for (uint i = 0; i < allocPara.currentActiveCellCount; i++) { result.push_back(growthProVec[i]); } return result; } void SceCells::copyCellsPreDivision_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toBeDivideCount * allocPara_m.maxAllNodePerCell; divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toBeDivideCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); //A&A divAuxData.tmpHertwigXdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpHertwigYdir = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); //A&A // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isDividing.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin())), isTrue()); // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.HertwigXdir.begin(), cellInfoVecs.HertwigYdir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpHertwigXdir.begin(), divAuxData.tmpHertwigYdir.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin())), isTrue()); } void SceCells::copyCellsEnterMitotic() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; divAuxData.nodeStorageCount = divAuxData.toEnterMitoticCount * allocPara_m.maxAllNodePerCell; divAuxData.tmpIsActive_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, true); divAuxData.tmpNodePosX_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpNodePosY_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpCellRank_M = thrust::device_vector<uint>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpDivDirX_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpDivDirY_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpCenterPosX_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpCenterPosY_M = thrust::device_vector<double>( divAuxData.toEnterMitoticCount, 0); divAuxData.tmpIsActive1_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos1_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpIsActive2_M = thrust::device_vector<bool>( divAuxData.nodeStorageCount, false); divAuxData.tmpXPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); divAuxData.tmpYPos2_M = thrust::device_vector<double>( divAuxData.nodeStorageCount, 0.0); // step 2 , continued // copy node info values ready for division /comment A&A thrust::counting_iterator<uint> iStart(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_permutation_iterator(cellInfoVecs.isEnteringMitotic.begin(), make_transform_iterator(iStart, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpIsActive_M.begin(), divAuxData.tmpNodePosX_M.begin(), divAuxData.tmpNodePosY_M.begin())), isTrue()); // step 3 , continued //copy cell info values ready for division /comment A&A thrust::counting_iterator<uint> iBegin(0); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isEnteringMitotic.begin(), thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpCellRank_M.begin(), divAuxData.tmpDivDirX_M.begin(), divAuxData.tmpDivDirY_M.begin(), divAuxData.tmpCenterPosX_M.begin(), divAuxData.tmpCenterPosY_M.begin())), isTrue()); } void SceCells::createTwoNewCellArr_M() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); //divDebug(); for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { divAuxData.tmp1IntnlVec.clear(); divAuxData.tmp2IntnlVec.clear(); vector<CVector> membrNodes; vector<CVector> intnlNodes; obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); CVector oldCenter = obtainCenter(i); //A&A commented //CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, // lenAlongMajorAxis); /*CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, lenAlongMajorAxis);*/ CVector divDir; divDir.x = divAuxData.tmpHertwigXdir[i] ; //A&A divDir.y = divAuxData.tmpHertwigYdir[i] ; //A&A double lenAlongHertwigAxis = calLengthAlongHertwigAxis(divDir, oldCenter, membrNodes);//A&A added // std::vector<VecVal> tmp1Membr, tmp2Membr; CVector cell1Center, cell2Center; obtainTwoNewCenters(oldCenter, divDir, lenAlongHertwigAxis, cell1Center, cell2Center); prepareTmpVec(i, divDir, oldCenter, tmp1Membr, tmp2Membr); processMemVec(tmp1Membr, tmp2Membr); shiftIntnlNodesByCellCenter(cell1Center, cell2Center); assembleVecForTwoCells(i); } //divDebug(); } //A&A void SceCells::findHertwigAxis() { divAuxData.tmp1MemActiveCounts.clear(); divAuxData.tmp1InternalActiveCounts.clear(); divAuxData.tmp2MemActiveCounts.clear(); divAuxData.tmp2InternalActiveCounts.clear(); //divDebug(); for (uint i = 0; i < divAuxData.toEnterMitoticCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; vector<CVector> membrNodes; vector<CVector> intnlNodes; obtainMembrAndIntnlNodes(i, membrNodes, intnlNodes); CVector oldCenter = obtainCenter(i); double lenAlongMajorAxis; CVector divDir = calDivDir_MajorAxis(oldCenter, membrNodes, lenAlongMajorAxis); cellInfoVecs.HertwigXdir[cellRank]=divDir.x ; cellInfoVecs.HertwigYdir[cellRank]=divDir.y ; //std::cout<<cellInfoVecs.HertwigXdir[cellRank]<<"HertwigXdir Thrust" <<std::endl; //std::cout<<cellInfoVecs.HertwigYdir[cellRank]<<"HertwigYdir Thrust" <<std::endl; //std::cout<<divDir.x<<"HertwigXdir " <<std::endl; //std::cout<<divDir.y<<"HertwigYdir " <<std::endl; } //divDebug(); } void SceCells::copyFirstCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRank]; } } void SceCells::copySecondCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRankMother = divAuxData.tmpCellRank_M[i]; uint cellRank = allocPara_m.currentActiveCellCount + i; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos2_M.begin(), divAuxData.tmpYPos2_M.begin(), divAuxData.tmpIsActive2_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp2InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp2MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; cellInfoVecs.cell_DppOld[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother]; cellInfoVecs.cell_Dpp[cellRank] = cellInfoVecs.cell_Dpp[cellRankMother]; } } //AAMIRI /* void SceCells::removeCellArr_M() { uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; for (uint i = 0; i < divAuxData.toBeDivideCount; i++) { uint cellRank = divAuxData.tmpCellRank_M[i]; uint nodeStartIndx = cellRank * maxAllNodePerCell + allocPara_m.bdryNodeCount; uint tmpStartIndx = i * maxAllNodePerCell; uint tmpEndIndx = (i + 1) * maxAllNodePerCell; thrust::constant_iterator<int> noAdhesion(-1), noAdhesion2(-1); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpStartIndx, thrust::make_zip_iterator( thrust::make_tuple(divAuxData.tmpXPos1_M.begin(), divAuxData.tmpYPos1_M.begin(), divAuxData.tmpIsActive1_M.begin(), noAdhesion, noAdhesion2)) + tmpEndIndx, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrIntnlIndex.begin())) + nodeStartIndx); cellInfoVecs.activeIntnlNodeCounts[cellRank] = divAuxData.tmp1InternalActiveCounts[i]; cellInfoVecs.activeMembrNodeCounts[cellRank] = divAuxData.tmp1MemActiveCounts[i]; cellInfoVecs.growthProgress[cellRank] = 0; cellInfoVecs.membrGrowProgress[cellRank] = 0.0; cellInfoVecs.isRandGrowInited[cellRank] = false; cellInfoVecs.lastCheckPoint[cellRank] = 0; } } */ void SceCells::updateActiveCellCount_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } //AAMIRI /* void SceCells::updateActiveCellCountAfterRemoval_M() { allocPara_m.currentActiveCellCount = allocPara_m.currentActiveCellCount + divAuxData.toBeDivideCount; nodes->setActiveCellCount(allocPara_m.currentActiveCellCount); } */ void SceCells::markIsDivideFalse_M() { thrust::fill(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, false); } void SceCells::adjustNodeVel_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + allocPara_m.bdryNodeCount + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), ForceZero()); } void SceCells::moveNodes_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), //Ali SaxpyFunctorDim2(dt)); SaxpyFunctorDim2_Damp(dt,Damp_Coef)); //Ali } //Ali // This function is written to assigned different damping coefficients to cells, therefore the boundary cells can have more damping void SceCells::moveNodes_BC_M() { thrust::counting_iterator<uint> iBegin2(0); uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.Cell_Damp.begin(), make_transform_iterator(iBegin2, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin())), SaxpyFunctorDim2_BC_Damp(dt)); } //Ali void SceCells::applyMemForce_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0); //Ali thrust::fill(cellInfoVecs.Cell_Time.begin(),cellInfoVecs.Cell_Time.begin() +allocPara_m.currentActiveCellCount,curTime); //Ali /* thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocX.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element(nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin()+ allocPara_m.bdryNodeCount+ totalNodeCountForActiveCells) ; Tisu_MinX= *MinX_Itr ; Tisu_MaxX= *MaxX_Itr ; Tisu_MinY= *MinY_Itr ; Tisu_MaxY= *MaxY_Itr ; */ //cout<< "# of boundary nodes"<< allocPara_m.bdryNodeCount<<endl ; //cout<< "# of total active nodes"<<totalNodeCountForActiveCells <<endl ; //cout<<"The minimum location in X in applyMemForce_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location in X in applyMemForce_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location in Y in applyMemForce_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location in Y in applyMemForce_M is="<<Tisu_MaxY<< endl; //Ali double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double grthPrgrCriVal_M = growthAuxData.grthPrgrCriVal_M_Ori; // for now constant //growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.Cell_Time.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.Cell_Time.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().membrTenMagRi.begin(), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrBendLeftX.begin(), nodes->getInfoVecs().membrBendLeftY.begin(), nodes->getInfoVecs().membrBendRightX.begin(), nodes->getInfoVecs().membrBendRightY.begin())) + allocPara_m.bdryNodeCount, AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M)); /**Ali Comment start thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeVelY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().membrTenMagRi.begin(), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrBendLeftX.begin(), nodes->getInfoVecs().membrBendLeftY.begin(), nodes->getInfoVecs().membrBendRightX.begin(), nodes->getInfoVecs().membrBendRightY.begin())) + allocPara_m.bdryNodeCount, AddMembrForce(allocPara_m.bdryNodeCount, maxAllNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr)); **/ // Ali comment end //Ali //Ali double* bendLeftXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftX[0])); double* bendLeftYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendLeftY[0])); double* bendRightXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightX[0])); double* bendRightYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().membrBendRightY[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), AddMembrBend(maxAllNodePerCell, nodeIsActiveAddr, bendLeftXAddr, bendLeftYAddr, bendRightXAddr, bendRightYAddr)); } //AAMIRI void SceCells::findTangentAndNormal_M() { uint totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0), iBegin1(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin1, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin1, ModuloFunctor(maxAllNodePerCell)), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_T.begin(), //AliE nodes->getInfoVecs().nodeF_MI_M_N.begin(), //AliE nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceX.begin(), nodes->getInfoVecs().nodeExtForceY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeF_MI_M_T.begin(), nodes->getInfoVecs().nodeF_MI_M_N.begin(), nodes->getInfoVecs().nodeCurvature.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(), nodes->getInfoVecs().nodeExtForceNormal.begin(), nodes->getInfoVecs().membrDistToRi.begin())), CalCurvatures(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)); } void SceCells::runAblationTest(AblationEvent& ablEvent) { for (uint i = 0; i < ablEvent.ablationCells.size(); i++) { int cellRank = ablEvent.ablationCells[i].cellNum; std::vector<uint> removeSeq = ablEvent.ablationCells[i].nodeNums; cellInfoVecs.activeNodeCountOfThisCell[cellRank] = cellInfoVecs.activeNodeCountOfThisCell[cellRank] - removeSeq.size(); nodes->removeNodes(cellRank, removeSeq); } } void SceCells::computeCenterPos_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); //uint totalMembrActiveNodeCount = thrust::reduce( // cellInfoVecs.activeMembrNodeCounts.begin(), // cellInfoVecs.activeMembrNodeCounts.begin() // + allocPara_m.currentActiveCellCount); uint totalIntnlActiveNodeCount = thrust::reduce( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount); thrust::copy_if( thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell)), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeCellType.begin())) + allocPara_m.bdryNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), ActiveAndIntnl()); thrust::reduce_by_key(cellNodeInfoVecs.cellRanks.begin(), cellNodeInfoVecs.cellRanks.begin() + totalIntnlActiveNodeCount, thrust::make_zip_iterator( thrust::make_tuple(cellNodeInfoVecs.activeXPoss.begin(), cellNodeInfoVecs.activeYPoss.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::equal_to<uint>(), CVec2Add()); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.activeIntnlNodeCounts.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), CVec2Divide()); } void SceCells::BC_Imp_M() { /* thrust::device_vector<double>::iterator MinX_Itr=thrust::min_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxX_Itr=thrust::max_element( cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MinY_Itr=thrust::min_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; thrust::device_vector<double>::iterator MaxY_Itr=thrust::max_element( cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount ) ; MinX= *MinX_Itr ; MaxX= *MaxX_Itr ; MinY= *MinY_Itr ; MaxY= *MaxY_Itr ; */ //cout<<"The minimum location of cell cetners in Y in BC_Imp_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location of cell centers in Y in BC_Imp_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location of cell centers in Y in BC_Imp_M is="<<Tisu_MaxY<< endl; /** thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin())), BC_Tissue_Damp(Damp_Coef)) ; **/ int NumActCells=allocPara_m.currentActiveCellCount ; //Ali thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin()) ), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.Cell_Damp.begin())) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.Cell_Damp.begin())), BC_Tissue_Damp(Tisu_MinX,Tisu_MaxX,Tisu_MinY,Tisu_MaxY,Damp_Coef,NumActCells)) ; /**void SceCells::randomizeGrowth() { thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthXDir.begin(), cellInfoVecs.growthYDir.begin(), cellInfoVecs.isRandGrowInited.begin())), AssignRandIfNotInit(growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, allocPara.currentActiveCellCount, growthAuxData.randGenAuxPara)); } **/ } void SceCells::growAtRandom_M(double dt) { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; randomizeGrowth_M(); updateGrowthProgress_M(); decideIsScheduleToGrow_M(); //computeCellTargetLength_M(); //computeDistToCellCenter_M(); //findMinAndMaxDistToCenter_M(); //computeLenDiffExpCur_M(); //stretchCellGivenLenDiff_M(); addPointIfScheduledToGrow_M(); //decideIsScheduleToShrink_M();// AAMIRI May5 //delPointIfScheduledToGrow_M();//AAMIRI - commented out on June20 adjustGrowthInfo_M(); } void SceCells::divide2D_M() { bool isDivisionPresent = decideIfGoingToDivide_M(); bool isEnteringMitotic = decideIfAnyCellEnteringMitotic() ; //A&A //A&A if (isEnteringMitotic){ std::cout<< "I am in EnteringMitotic"<< std::endl; copyCellsEnterMitotic(); findHertwigAxis(); } //A&A if (!isDivisionPresent) { return; } //aniDebug = true; copyCellsPreDivision_M(); createTwoNewCellArr_M(); copyFirstCellArr_M(); copySecondCellArr_M(); updateActiveCellCount_M(); markIsDivideFalse_M(); //divDebug(); } void SceCells::distributeCellGrowthProgress_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; thrust::counting_iterator<uint> countingBegin(0); thrust::counting_iterator<uint> countingEnd(totalNodeCountForActiveCells); thrust::copy( thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(countingEnd, DivideFunctor(allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeGrowPro.begin() + allocPara_m.bdryNodeCount); std::cout << "the vlaue of init time stage in distributeCellGrowthProgress_M is"<< InitTimeStage << std:: endl ; if (curTime <= InitTimeStage+dt)//AAMIRI /A & A thrust::copy( cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.end(), cellInfoVecs.lastCheckPoint.begin() ); } void SceCells::allComponentsMove_M() { //moveNodes_M(); //Ali moveNodes_BC_M(); //Ali } //Ali modified this function to introduce differential proliferation rates void SceCells::randomizeGrowth_M() { double CntrTisuX=0.5*(Tisu_MaxX-Tisu_MinX) ; double CntrTisuY=0.5*(Tisu_MaxY-Tisu_MinY) ; //cout<<"The minimum location of cell cetners in Y in randomizeGrowth_M is="<<Tisu_MinX<< endl; //cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxX<< endl; //cout<<"The minimum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MinY<< endl; //cout<<"The maximum location of cell centers in Y in randomizeGrowth_M is="<<Tisu_MaxY<< endl; uint seed = time(NULL); thrust::counting_iterator<uint> countingBegin(0); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin(), countingBegin)) + allocPara_m.currentActiveCellCount, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isRandGrowInited.begin())), RandomizeGrow_M(CntrTisuX,CntrTisuY,Tisu_R,growthAuxData.randomGrowthSpeedMin, growthAuxData.randomGrowthSpeedMax, seed)); } void SceCells::updateGrowthProgress_M() { thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgressOld.begin()); /* thrust::transform(cellInfoVecs.growthSpeed.begin(), cellInfoVecs.growthSpeed.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin(), SaxpyFunctorWithMaxOfOne(dt)); */ cout << " I am trying to update growth progress" << endl ; //double dummy=0 ; double mitoticCheckPoint=growthAuxData.grthPrgrCriVal_M_Ori ; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_DppOld.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_DppOld.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthSpeed.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.growthProgress.begin(), DppGrowRegulator(dt,mitoticCheckPoint)); } void SceCells::decideIsScheduleToGrow_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), PtCondiOp(miscPara.growThreshold)); } //AAMIRI May5 void SceCells::decideIsScheduleToShrink_M() { double laserCenterY = 25.0; double laserCenterX = 25.0; double laserRadius = 4.0; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(allocPara_m.currentActiveCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), cellInfoVecs.isScheduledToShrink.begin())), thrust::make_zip_iterator( thrust::make_tuple(iEnd, cellInfoVecs.centerCoordX.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.centerCoordY.begin()+allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToShrink.begin()+allocPara_m.currentActiveCellCount)), cellInfoVecs.isScheduledToShrink.begin(), isDelOp(laserCenterX, laserCenterY, laserRadius)); } void SceCells::computeCellTargetLength_M() { thrust::transform(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.expectedLength.begin(), CompuTarLen(bioPara.cellInitLength, bioPara.cellFinalLength)); } void SceCells::computeDistToCellCenter_M() { thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(totalNodeCountForActiveCells); uint endIndx = allocPara_m.bdryNodeCount + totalNodeCountForActiveCells; thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeLocY.begin() + allocPara_m.bdryNodeCount, nodes->getInfoVecs().nodeIsActive.begin() + allocPara_m.bdryNodeCount)), thrust::make_zip_iterator( thrust::make_tuple( make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor( allocPara_m.maxAllNodePerCell))), nodes->getInfoVecs().nodeLocX.begin() + endIndx, nodes->getInfoVecs().nodeLocY.begin() + endIndx, nodes->getInfoVecs().nodeIsActive.begin() + endIndx)), cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), CompuDist()); } void SceCells::findMinAndMaxDistToCenter_M() { thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.smallestDistance.begin(), thrust::equal_to<uint>(), thrust::minimum<double>()); // for nodes of each cell, find the maximum distance from the node to the corresponding // cell center along the pre-defined growth direction. thrust::reduce_by_key( make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)), make_transform_iterator(countingBegin, DivideFunctor(allocPara_m.maxAllNodePerCell)) + totalNodeCountForActiveCells, cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.biggestDistance.begin(), thrust::equal_to<uint>(), thrust::maximum<double>()); } void SceCells::computeLenDiffExpCur_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.expectedLength.begin(), cellInfoVecs.smallestDistance.begin(), cellInfoVecs.biggestDistance.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.lengthDifference.begin(), CompuDiff()); } void SceCells::stretchCellGivenLenDiff_M() { uint count = allocPara_m.maxAllNodePerCell; uint bdry = allocPara_m.bdryNodeCount; uint actCount = totalNodeCountForActiveCells; uint all = bdry + actCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(actCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin(), make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iBegin, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry, make_transform_iterator(iBegin, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( cellNodeInfoVecs.distToCenterAlongGrowDir.begin() + actCount, make_permutation_iterator( cellInfoVecs.lengthDifference.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthXDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), make_permutation_iterator( cellInfoVecs.growthYDir.begin(), make_transform_iterator(iEnd, DivideFunctor(count))), nodes->getInfoVecs().nodeVelX.begin() + all, nodes->getInfoVecs().nodeVelY.begin() + all, make_transform_iterator(iEnd, ModuloFunctor(count)))), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeVelX.begin() + bdry, nodes->getInfoVecs().nodeVelY.begin() + bdry)), ApplyStretchForce_M(bioPara.elongationCoefficient, allocPara_m.maxMembrNodePerCell)); } void SceCells::addPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), iBegin, cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToGrow.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.centerCoordX.begin() + activeCellCount, cellInfoVecs.centerCoordY.begin() + activeCellCount, iEnd, cellInfoVecs.lastCheckPoint.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.lastCheckPoint.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), AddPtOp_M(seed, miscPara.addNodeDistance, miscPara.growThreshold, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } //AAMIRI void SceCells::delPointIfScheduledToGrow_M() { uint seed = time(NULL); uint activeCellCount = allocPara_m.currentActiveCellCount; thrust::counting_iterator<uint> iBegin(0); thrust::counting_iterator<uint> iEnd(activeCellCount); int timeStep = curTime/dt; if (curTime>70000.0 && curTime<70000.1){ decideIsScheduleToShrink_M();// AAMIRI } if (curTime > 70000.0) thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToShrink.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordY.begin(), iBegin, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.isScheduledToShrink.begin() + activeCellCount, cellInfoVecs.activeIntnlNodeCounts.begin() + activeCellCount, cellInfoVecs.centerCoordX.begin() + activeCellCount, cellInfoVecs.centerCoordY.begin() + activeCellCount, iEnd, cellInfoVecs.activeMembrNodeCounts.begin() + activeCellCount, cellInfoVecs.isCellActive.begin() + activeCellCount, cellInfoVecs.growthSpeed.begin() + activeCellCount)), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.isCellActive.begin(), cellInfoVecs.growthSpeed.begin())), DelPtOp_M(seed, timeStep, growthAuxData.adhIndxAddr, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.nodeIsActiveAddress)); } bool SceCells::decideIfGoingToDivide_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isDividing.begin(), CompuIsDivide_M(allocPara_m.maxIntnlNodePerCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeDivideCount = thrust::reduce(cellInfoVecs.isDividing.begin(), cellInfoVecs.isDividing.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeDivideCount > 0) { return true; } else { return false; } } //A&A bool SceCells::decideIfAnyCellEnteringMitotic() { double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgressOld.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isEnteringMitotic.begin(), CompuIsEnteringMitotic_M(grthPrgrCriVal_M)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toEnterMitoticCount = thrust::reduce(cellInfoVecs.isEnteringMitotic.begin(), cellInfoVecs.isEnteringMitotic.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toEnterMitoticCount > 0) { return true; } else { return false; } } //AAMIRI /* bool SceCells::decideIfGoingToRemove_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.growthProgress.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isRemoving.begin(), CompuIsRemoving_M(allocPara_m.maxIntnlNodePerCell)); // sum all bool values which indicate whether the cell is going to divide. // toBeDivideCount is the total number of cells going to divide. divAuxData.toBeRemovingCount = thrust::reduce(cellInfoVecs.isRemoving.begin(), cellInfoVecs.isRemoving.begin() + allocPara_m.currentActiveCellCount, (uint) (0)); if (divAuxData.toBeRemovingCount > 0) { return true; } else { return false; } } */ AniRawData SceCells::obtainAniRawData(AnimationCriteria& aniCri) { uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin())) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin()))); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; CVector tmpPos; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double aniVal; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = hostTmpVectorTenMag[index1]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = hostTmpVectorTenMag[index2]; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } } for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + maxMemNodePerCell + k; if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = -1; rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.internalLinks.push_back(linkData); } } } } } return rawAniData; } AniRawData SceCells::obtainAniRawDataGivenCellColor(vector<double>& cellColors, AnimationCriteria& aniCri, vector<double>& cellsPerimeter, vector <double> & cellsDppLevel) { //AliE cout << "I am in obtainAniRawDataGivenCellColor start"<<endl; uint activeCellCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; uint beginIndx = allocPara_m.bdryNodeCount; assert(cellColors.size() >= activeCellCount); assert(cellsPerimeter.size() == activeCellCount); //AliE AniRawData rawAniData; //cout << "size of potential pairs = " << pairs.size() << endl; // unordered_map is more efficient than map, but it is a c++ 11 feature // and c++ 11 seems to be incompatible with Thrust. IndexMap locIndexToAniIndexMap; uint maxActiveNode = activeCellCount * maxNodePerCell; thrust::host_vector<double> hostTmpVectorLocX(maxActiveNode); thrust::host_vector<double> hostTmpVectorLocY(maxActiveNode); thrust::host_vector<bool> hostIsActiveVec(maxActiveNode); thrust::host_vector<int> hostBondVec(maxActiveNode); thrust::host_vector<double> hostTmpVectorTenMag(maxActiveNode); thrust::host_vector<double> hostTmpVectorF_MI_M_x(maxActiveNode);//AAMIRI //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_y(maxActiveNode);//AAMIRI //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_T(maxActiveNode); //AliE thrust::host_vector<double> hostTmpVectorF_MI_M_N(maxActiveNode);//AliE thrust::host_vector<double> hostTmpVectorNodeCurvature(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorExtForceTangent(maxActiveNode);//AAMIRI thrust::host_vector<double> hostTmpVectorExtForceNormal(maxActiveNode);//AAMIRI thrust::copy( thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeExtForceNormal.begin())),//AAMIRI thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeLocX.begin(), nodes->getInfoVecs().nodeLocY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeF_MI_M_y.begin(),//AAMIRI //AliE nodes->getInfoVecs().nodeCurvature.begin(),//AAMIRI nodes->getInfoVecs().nodeIsActive.begin(), nodes->getInfoVecs().nodeAdhereIndex.begin(), nodes->getInfoVecs().membrTensionMag.begin(), nodes->getInfoVecs().nodeExtForceTangent.begin(),//AAMIRI nodes->getInfoVecs().nodeExtForceNormal.begin()))//AAMIRI + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple(hostTmpVectorLocX.begin(), hostTmpVectorLocY.begin(), hostTmpVectorF_MI_M_x.begin(), hostTmpVectorF_MI_M_y.begin(),//AAMIRI hostTmpVectorNodeCurvature.begin(), //AAMIRI hostIsActiveVec.begin(), hostBondVec.begin(), hostTmpVectorTenMag.begin(), hostTmpVectorExtForceTangent.begin(), hostTmpVectorExtForceNormal.begin())));//AAMIRI //Copy more than 10 elements is not allowed so, I separate it thrust::copy( thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(), //Ali nodes->getInfoVecs().nodeF_MI_M_N.begin() //Ali )), thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().nodeF_MI_M_T.begin(),//AliE nodes->getInfoVecs().nodeF_MI_M_N.begin() //AliE )) + maxActiveNode, thrust::make_zip_iterator( thrust::make_tuple( hostTmpVectorF_MI_M_T.begin(), hostTmpVectorF_MI_M_N.begin() ))); thrust::host_vector<uint> curActiveMemNodeCounts = cellInfoVecs.activeMembrNodeCounts; thrust::host_vector<uint> curActiveIntnlNodeCounts = cellInfoVecs.activeIntnlNodeCounts; CVector tmpPos; CVector tmpF_MI_M ;//AAmiri CVector tmpExtForce;//AAMIRI double tmpCurv; uint index1; int index2; std::vector<BondInfo> bondInfoVec; double node1X, node1Y; double node2X, node2Y; double node1F_MI_M_x, node1F_MI_M_y;//AAMIRI //AliE double nodeExtForceT, nodeExtForceN;//AAMIRI double aniVal; double aniVal2; double tmpF_MI_M_MagN_Int[activeCellCount-1] ; //AliE //This is how the VTK file is intended to be written. First the memmbraen nodes are going to be written and then internal nodes. //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { tmpF_MI_M_MagN_Int[i]=0.0 ; for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); //AliE // tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+sqrt(pow(hostTmpVectorF_MI_M_x[index1],2)+pow(hostTmpVectorF_MI_M_y[index1],2)) ; //AliE tmpF_MI_M_MagN_Int[i]=tmpF_MI_M_MagN_Int[i]+abs(hostTmpVectorF_MI_M_N[index1]) ; //AliE nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI rawAniData.aniNodeExtForceArr.push_back(tmpExtForce); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } //loop on internal nodes for (uint i=0; i<activeCellCount; i++){ for (uint j = maxMemNodePerCell; j < maxNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if ( hostIsActiveVec[index1]==true ) { tmpCurv = hostTmpVectorNodeCurvature[index1];//AAMIRI rawAniData.aniNodeCurvature.push_back(tmpCurv);//AAMIRI node1F_MI_M_x= hostTmpVectorF_MI_M_x[index1]; //AliE node1F_MI_M_y= hostTmpVectorF_MI_M_y[index1]; //AliE tmpF_MI_M= CVector(node1F_MI_M_x, node1F_MI_M_y, 0.0); //AliE rawAniData.aniNodeF_MI_M.push_back(tmpF_MI_M); nodeExtForceT = hostTmpVectorExtForceTangent[index1];//AAMIRI nodeExtForceN = hostTmpVectorExtForceNormal[index1];//AAMIRI tmpExtForce = CVector(nodeExtForceT, nodeExtForceN, 0.0);//AAMIRI rawAniData.aniNodeExtForceArr.push_back(tmpExtForce); rawAniData.aniNodeRank.push_back(i);//AAMIRI } } } for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < maxMemNodePerCell; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (hostIsActiveVec[index1] == true) { index2 = hostBondVec[index1]; if (index2 > index1 && index2 != -1) { BondInfo bond; bond.cellRank1 = i; bond.pos1 = CVector(hostTmpVectorLocX[index1], hostTmpVectorLocY[index1], 0); bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell; bond.pos2 = CVector(hostTmpVectorLocX[index2], hostTmpVectorLocY[index2], 0); bondInfoVec.push_back(bond); } } } } rawAniData.bondsArr = bondInfoVec; uint curIndex = 0; //loop on membrane nodes for (uint i = 0; i < activeCellCount; i++) { for (uint j = 0; j < curActiveMemNodeCounts[i]; j++) { index1 = beginIndx + i * maxNodePerCell + j; if (j == curActiveMemNodeCounts[i] - 1) { index2 = beginIndx + i * maxNodePerCell; } else { index2 = beginIndx + i * maxNodePerCell + j + 1; } if (hostIsActiveVec[index1] == true && hostIsActiveVec[index2] == true) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; IndexMap::iterator it = locIndexToAniIndexMap.find(index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali added //aniVal2=dppLevels_Cell[i] ; aniVal2=cellsDppLevel[i] ; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index2]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.memLinks.push_back(linkData); } } } //loop on internal nodes for (uint i = 0; i < activeCellCount; i++) { // for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { for (uint j = 0; j < allocPara_m.maxIntnlNodePerCell; j++) { for (uint k = 0; k < allocPara_m.maxAllNodePerCell; k++) { //Ali //for (uint k = j + 1; k < allocPara_m.maxIntnlNodePerCell; k++) { //Ali comment index1 = i * maxNodePerCell + maxMemNodePerCell + j; index2 = i * maxNodePerCell + k; //Ali // index2 = i * maxNodePerCell + maxMemNodePerCell + k; //Ali comment // if (hostIsActiveVec[index1] && hostIsActiveVec[index2]) { if (hostIsActiveVec[index1] && hostIsActiveVec[index2]&& index1 !=index2 ) { node1X = hostTmpVectorLocX[index1]; node1Y = hostTmpVectorLocY[index1]; node2X = hostTmpVectorLocX[index2]; node2Y = hostTmpVectorLocY[index2]; if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) { IndexMap::iterator it = locIndexToAniIndexMap.find( index1); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index1, curIndex)); curIndex++; tmpPos = CVector(node1X, node1Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index2); if (it == locIndexToAniIndexMap.end()) { locIndexToAniIndexMap.insert( std::pair<uint, uint>(index2, curIndex)); curIndex++; tmpPos = CVector(node2X, node2Y, 0); //aniVal = hostTmpVectorNodeType[index1]; aniVal = cellColors[i]; rawAniData.aniNodeF_MI_M_MagN_Int.push_back(tmpF_MI_M_MagN_Int[i]/cellsPerimeter[i]) ; //Ali Added //aniVal2=dppLevels_Cell[i]; aniVal2=cellsDppLevel[i]; rawAniData.dppLevel.push_back(aniVal2) ; //Ali Added rawAniData.aniNodePosArr.push_back(tmpPos); rawAniData.aniNodeVal.push_back(aniVal); } it = locIndexToAniIndexMap.find(index1); uint aniIndex1 = it->second; it = locIndexToAniIndexMap.find(index2); uint aniIndex2 = it->second; LinkAniData linkData; linkData.node1Index = aniIndex1; linkData.node2Index = aniIndex2; rawAniData.internalLinks.push_back(linkData); } } } } } cout << "I am in obtainAniRawDataGivenCellColor end"<<endl; return rawAniData; } void SceCells::copyInitActiveNodeCount_M( std::vector<uint>& initMembrActiveNodeCounts, std::vector<uint>& initIntnlActiveNodeCounts, std::vector<double> &initGrowProgVec) { assert( initMembrActiveNodeCounts.size() == initIntnlActiveNodeCounts.size()); totalNodeCountForActiveCells = initMembrActiveNodeCounts.size() * allocPara_m.maxAllNodePerCell; thrust::copy(initMembrActiveNodeCounts.begin(), initMembrActiveNodeCounts.end(), cellInfoVecs.activeMembrNodeCounts.begin()); thrust::copy(initIntnlActiveNodeCounts.begin(), initIntnlActiveNodeCounts.end(), cellInfoVecs.activeIntnlNodeCounts.begin()); thrust::copy(initGrowProgVec.begin(), initGrowProgVec.end(), cellInfoVecs.growthProgress.begin()); } void SceCells::myDebugFunction() { uint maxActiveNodeCount = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxActiveCellCount = allocPara_m.currentActiveCellCount; std::cout << "totalNodeCountforActiveCells: " << totalNodeCountForActiveCells << std::endl; std::cout << "maxAllNodePerCell: " << allocPara_m.maxAllNodePerCell << std::endl; std::cout << "maxActiveCellCount: " << maxActiveCellCount << std::endl; std::cout << "bdryNodeCount: " << allocPara_m.bdryNodeCount << std::endl; std::cout << "grow threshold: " << miscPara.growThreshold << std::endl; std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthProgress[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.isScheduledToGrow[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lastCheckPoint[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeCount; i++) { if (nodes->getInfoVecs().nodeIsActive[i] && nodes->getInfoVecs().nodeCellType[i] == CellIntnl) { std::cout << nodes->getInfoVecs().nodeVelX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.activeIntnlNodeCounts[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.expectedLength[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.smallestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.biggestDistance[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.lengthDifference[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.centerCoordY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthXDir[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveCellCount; i++) { std::cout << cellInfoVecs.growthYDir[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::divDebug() { std::cout << "tmpIsActive_M: "; for (uint i = 0; i < divAuxData.tmpIsActive_M.size(); i++) { std::cout << divAuxData.tmpIsActive_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosX_M: "; for (uint i = 0; i < divAuxData.tmpNodePosX_M.size(); i++) { std::cout << divAuxData.tmpNodePosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpNodePosY_M : "; for (uint i = 0; i < divAuxData.tmpNodePosY_M.size(); i++) { std::cout << divAuxData.tmpNodePosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCellRank_M : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { std::cout << divAuxData.tmpCellRank_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirX_M : "; for (uint i = 0; i < divAuxData.tmpDivDirX_M.size(); i++) { std::cout << divAuxData.tmpDivDirX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpDivDirY_M : "; for (uint i = 0; i < divAuxData.tmpDivDirY_M.size(); i++) { std::cout << divAuxData.tmpDivDirY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosX_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosX_M.size(); i++) { std::cout << divAuxData.tmpCenterPosX_M[i] << " "; } std::cout << std::endl; std::cout << "tmpCenterPosY_M : "; for (uint i = 0; i < divAuxData.tmpCenterPosY_M.size(); i++) { std::cout << divAuxData.tmpCenterPosY_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive1_M : "; for (uint i = 0; i < divAuxData.tmpIsActive1_M.size(); i++) { std::cout << divAuxData.tmpIsActive1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos1_M : "; for (uint i = 0; i < divAuxData.tmpXPos1_M.size(); i++) { std::cout << divAuxData.tmpXPos1_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive1_M[i] && divAuxData.tmpIsActive1_M[i - 1] && fabs(divAuxData.tmpXPos1_M[i] - divAuxData.tmpXPos1_M[i - 1]) > 0.1) { std::cout << "11111111111111111111111, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "XPos1_onDevice : "; for (uint i = 0; i < divAuxData.tmpCellRank_M.size(); i++) { for (uint j = 0; j < allocPara_m.maxAllNodePerCell; j++) { uint index = divAuxData.tmpCellRank_M[i] * allocPara_m.maxAllNodePerCell + j; std::cout << nodes->getInfoVecs().nodeLocX[index] << " "; } } std::cout << std::endl; std::cout << "tmpYPos1_M : "; for (uint i = 0; i < divAuxData.tmpYPos1_M.size(); i++) { std::cout << divAuxData.tmpYPos1_M[i] << " "; } std::cout << std::endl; std::cout << "tmpIsActive2_M: "; for (uint i = 0; i < divAuxData.tmpIsActive2_M.size(); i++) { std::cout << divAuxData.tmpIsActive2_M[i] << " "; } std::cout << std::endl; std::cout << "tmpXPos2_M : "; for (uint i = 0; i < divAuxData.tmpXPos2_M.size(); i++) { std::cout << divAuxData.tmpXPos2_M[i] << " "; if (i > 0 && i < allocPara_m.maxMembrNodePerCell && divAuxData.tmpIsActive2_M[i] && divAuxData.tmpIsActive2_M[i - 1] && fabs(divAuxData.tmpXPos2_M[i] - divAuxData.tmpXPos2_M[i - 1]) > 0.1) { std::cout << "2222222222222222222, " << i << std::endl; int jj; cin >> jj; } } std::cout << std::endl; std::cout << "tmpYPos2_M : "; for (uint i = 0; i < divAuxData.tmpYPos2_M.size(); i++) { std::cout << divAuxData.tmpYPos2_M[i] << " "; } std::cout << std::endl; std::cout << "tmp1InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp1InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2InternalActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2InternalActiveCounts.size(); i++) { std::cout << divAuxData.tmp2InternalActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp1MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp1MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp1MemActiveCounts[i] << " "; } std::cout << std::endl; std::cout << "tmp2MemActiveCounts: "; for (uint i = 0; i < divAuxData.tmp2MemActiveCounts.size(); i++) { std::cout << divAuxData.tmp2MemActiveCounts[i] << " "; } std::cout << std::endl; int jj; std::cin >> jj; } void SceCells::adjustGrowthInfo_M() { uint halfMax = allocPara_m.maxIntnlNodePerCell / 2; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.isScheduledToGrow.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isScheduledToGrow.begin(), cellInfoVecs.growthProgress.begin(), cellInfoVecs.lastCheckPoint.begin())), AdjustGrowth(halfMax), thrust::identity<bool>()); } VtkAnimationData SceCells::outputVtkData(AniRawData& rawAniData, AnimationCriteria& aniCri) { VtkAnimationData vtkData; for (uint i = 0; i < rawAniData.aniNodePosArr.size(); i++) { PointAniData ptAniData; ptAniData.pos = rawAniData.aniNodePosArr[i]; ptAniData.F_MI_M_MagN_Int= rawAniData.aniNodeF_MI_M_MagN_Int[i]; //AliE ptAniData.dppLevel1= rawAniData.dppLevel[i]; //AliE ptAniData.F_MI_M = rawAniData.aniNodeF_MI_M[i];//AAMIRI ptAniData.colorScale = rawAniData.aniNodeVal[i]; ptAniData.colorScale2 = rawAniData.aniNodeCurvature[i];//AAMIRI ptAniData.rankScale = rawAniData.aniNodeRank[i];//AAMIRI ptAniData.extForce = rawAniData.aniNodeExtForceArr[i];//AAMIRI vtkData.pointsAniData.push_back(ptAniData); } for (uint i = 0; i < rawAniData.internalLinks.size(); i++) { LinkAniData linkData = rawAniData.internalLinks[i]; vtkData.linksAniData.push_back(linkData); } for (uint i = 0; i < rawAniData.memLinks.size(); i++) { LinkAniData linkData = rawAniData.memLinks[i]; vtkData.linksAniData.push_back(linkData); } vtkData.isArrowIncluded = false; return vtkData; } void SceCells::copyToGPUConstMem() { double pI_CPU = acos(-1.0); double minLengthCPU = globalConfigVars.getConfigValue("MinLength").toDouble(); cudaMemcpyToSymbol(minLength, &minLengthCPU, sizeof(double)); double minDivisorCPU = globalConfigVars.getConfigValue("MinDivisor").toDouble(); cudaMemcpyToSymbol(minDivisor, &minDivisorCPU, sizeof(double)); cudaMemcpyToSymbol(membrEquLen, &membrPara.membrEquLenCPU, sizeof(double)); cudaMemcpyToSymbol(membrStiff, &membrPara.membrStiffCPU, sizeof(double)); cudaMemcpyToSymbol(membrStiff_Mitotic, &membrPara.membrStiff_Mitotic, sizeof(double)); // Ali June 30 cudaMemcpyToSymbol(pI, &pI_CPU, sizeof(double)); cudaMemcpyToSymbol(bendCoeff, &membrPara.membrBendCoeff, sizeof(double)); cudaMemcpyToSymbol(bendCoeff_Mitotic, &membrPara.membrBendCoeff_Mitotic, sizeof(double));//AAMIRI cudaMemcpyToSymbol(F_Ext_Incline_M2, &membrPara.F_Ext_Incline, sizeof(double)); //Ali uint maxAllNodePerCellCPU = globalConfigVars.getConfigValue( "MaxAllNodeCountPerCell").toInt(); uint maxMembrNodePerCellCPU = globalConfigVars.getConfigValue( "MaxMembrNodeCountPerCell").toInt(); uint maxIntnlNodePerCellCPU = globalConfigVars.getConfigValue( "MaxIntnlNodeCountPerCell").toInt(); cudaMemcpyToSymbol(maxAllNodePerCell, &maxAllNodePerCellCPU, sizeof(uint)); cudaMemcpyToSymbol(maxMembrPerCell, &maxMembrNodePerCellCPU, sizeof(uint)); cudaMemcpyToSymbol(maxIntnlPerCell, &maxIntnlNodePerCellCPU, sizeof(uint)); double sceIntnlBParaCPU_M[5]; double sceIntraParaCPU_M[5]; double sceIntraParaDivCPU_M[5]; double U0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble(); double V0_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble(); double k1_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble(); double k2_IntnlB = globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble(); double intnlBEffectiveRange = globalConfigVars.getConfigValue( "IntnlBEffectRange").toDouble(); sceIntnlBParaCPU_M[0] = U0_IntnlB; sceIntnlBParaCPU_M[1] = V0_IntnlB; sceIntnlBParaCPU_M[2] = k1_IntnlB; sceIntnlBParaCPU_M[3] = k2_IntnlB; sceIntnlBParaCPU_M[4] = intnlBEffectiveRange; ////////////////////// //// Block 3 ///////// ////////////////////// double U0_Intra = globalConfigVars.getConfigValue("IntraCell_U0").toDouble(); double V0_Intra = globalConfigVars.getConfigValue("IntraCell_V0").toDouble(); double k1_Intra = globalConfigVars.getConfigValue("IntraCell_k1").toDouble(); double k2_Intra = globalConfigVars.getConfigValue("IntraCell_k2").toDouble(); double intraLinkEffectiveRange = globalConfigVars.getConfigValue( "IntraEffectRange").toDouble(); sceIntraParaCPU_M[0] = U0_Intra; sceIntraParaCPU_M[1] = V0_Intra; sceIntraParaCPU_M[2] = k1_Intra; sceIntraParaCPU_M[3] = k2_Intra; sceIntraParaCPU_M[4] = intraLinkEffectiveRange; ////////////////////// //// Block 4 ///////// ////////////////////// double U0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble(); double V0_Intra_Div = globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble(); double k1_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble(); double k2_Intra_Div = globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble(); double intraDivEffectiveRange = globalConfigVars.getConfigValue( "IntraDivEffectRange").toDouble(); sceIntraParaDivCPU_M[0] = U0_Intra_Div; sceIntraParaDivCPU_M[1] = V0_Intra_Div; sceIntraParaDivCPU_M[2] = k1_Intra_Div; sceIntraParaDivCPU_M[3] = k2_Intra_Div; sceIntraParaDivCPU_M[4] = intraDivEffectiveRange; cudaMemcpyToSymbol(grthPrgrCriEnd_M, &growthAuxData.grthProgrEndCPU, sizeof(double)); //cudaMemcpyToSymbol(grthPrgrCriVal_M, &growthPrgrCriVal, sizeof(double)); cudaMemcpyToSymbol(sceIB_M, sceIntnlBParaCPU_M, 5 * sizeof(double)); cudaMemcpyToSymbol(sceII_M, sceIntraParaCPU_M, 5 * sizeof(double)); cudaMemcpyToSymbol(sceIIDiv_M, sceIntraParaDivCPU_M, 5 * sizeof(double)); double IBDivHost[5]; IBDivHost[0] = globalConfigVars.getConfigValue("SceIntnlB_U0_Div").toDouble(); IBDivHost[1] = globalConfigVars.getConfigValue("SceIntnlB_V0_Div").toDouble(); IBDivHost[2] = globalConfigVars.getConfigValue("SceIntnlB_k1_Div").toDouble(); IBDivHost[3] = globalConfigVars.getConfigValue("SceIntnlB_k2_Div").toDouble(); IBDivHost[4] = globalConfigVars.getConfigValue("IntnlBDivEffectRange").toDouble(); cudaMemcpyToSymbol(sceIBDiv_M, IBDivHost, 5 * sizeof(double)); } void SceCells::handleMembrGrowth_M() { // figure out membr growth speed calMembrGrowSpeed_M(); // figure out which cells will add new point adjustMembrGrowSpeed_M(); decideIfAddMembrNode_M(); // add membr nodes addMembrNodes_M(); //membrDebug(); } void SceCells::calMembrGrowSpeed_M() { membrPara.membrGrowCoeff = growthAuxData.prolifDecay * membrPara.membrGrowCoeff_Ori; membrPara.membrGrowLimit = growthAuxData.prolifDecay * membrPara.membrGrowLimit_Ori; // reduce_by_key, find value of max tension and their index thrust::counting_iterator<uint> iBegin(0); uint maxNPerCell = allocPara_m.maxAllNodePerCell; thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple( nodes->getInfoVecs().membrTenMagRi.begin(), make_transform_iterator(iBegin, ModuloFunctor(maxNPerCell)), nodes->getInfoVecs().membrLinkRiMidX.begin(), nodes->getInfoVecs().membrLinkRiMidY.begin(), nodes->getInfoVecs().membrDistToRi.begin())), cellInfoVecs.cellRanksTmpStorage.begin(), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.maxTenRiVec.begin(), cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin(), cellInfoVecs.maxDistToRiVec.begin())), thrust::equal_to<uint>(), MaxWInfo()); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)), make_transform_iterator(iBegin, DivideFunctor(maxNPerCell)) + totalNodeCountForActiveCells, nodes->getInfoVecs().membrTensionMag.begin(), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.aveTension.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.aveTension.begin(), thrust::divides<double>()); // linear relationship with highest tension; capped by a given value thrust::transform(cellInfoVecs.aveTension.begin(), cellInfoVecs.aveTension.begin() + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), MultiWithLimit(membrPara.membrGrowCoeff, membrPara.membrGrowLimit)); } void SceCells::adjustMembrGrowSpeed_M() { thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple( cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin())) + allocPara_m.currentActiveCellCount, cellInfoVecs.membrGrowSpeed.begin(), AdjustMembrGrow(membrPara.growthConst_N, membrPara.initMembrCt_N, membrPara.initIntnlCt_N)); } void SceCells::decideIfAddMembrNode_M() { // decide if add membrane node given current active node count and // membr growth progress uint curActCellCt = allocPara_m.currentActiveCellCount; thrust::transform(cellInfoVecs.membrGrowSpeed.begin(), cellInfoVecs.membrGrowSpeed.begin() + curActCellCt, cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin(), SaxpyFunctor(dt)); uint maxMembrNode = allocPara_m.maxMembrNodePerCell; /**Ali thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.activeMembrNodeCounts.begin())) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemGrowFunc(maxMembrNode)); */ thrust::transform( thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin())), thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.membrGrowProgress.begin(),cellInfoVecs.maxDistToRiVec.begin())) + curActCellCt, thrust::make_zip_iterator( thrust::make_tuple(cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.membrGrowProgress.begin())), MemGrowFunc(maxMembrNode)); } /** * Add new membrane elements to cells. * This operation is relatively expensive because of memory rearrangement. */ void SceCells::addMembrNodes_M() { thrust::counting_iterator<uint> iBegin(0); uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxNodePerCell = allocPara_m.maxAllNodePerCell; thrust::transform_if( thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin())), thrust::make_zip_iterator( thrust::make_tuple(iBegin, cellInfoVecs.maxTenIndxVec.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.maxTenRiMidXVec.begin(), cellInfoVecs.maxTenRiMidYVec.begin())) + curAcCCount, cellInfoVecs.isMembrAddingNode.begin(), cellInfoVecs.activeMembrNodeCounts.begin(), AddMemNode(maxNodePerCell, growthAuxData.nodeIsActiveAddress, growthAuxData.nodeXPosAddress, growthAuxData.nodeYPosAddress, growthAuxData.adhIndxAddr), thrust::identity<bool>()); } void SceCells::membrDebug() { uint curAcCCount = allocPara_m.currentActiveCellCount; uint maxActiveNodeC = curAcCCount * allocPara_m.maxAllNodePerCell; uint maxNodePC = allocPara_m.maxAllNodePerCell; //uint tmp = 0; //for (uint i = 0; i < curAcCCount; i++) { // tmp += cellInfoVecs.isMembrAddingNode[i]; //} //if (tmp != 0) { // tmpDebug = true; //} //if (!tmpDebug) { // return; //} for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTensionMag[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrTenMagRi[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidX[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { if (i % maxNodePC == 0 || i % maxNodePC == 199 || i % maxNodePC == 200) { std::cout << nodes->getInfoVecs().membrLinkRiMidY[i] << " "; } } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendLeftY[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < maxActiveNodeC; i++) { std::cout << nodes->getInfoVecs().membrBendRightX[i] << " "; } std::cout << std::endl; for (uint i = 0; i < curAcCCount; i++) { std::cout << "(" << cellInfoVecs.maxTenIndxVec[i] << "," << cellInfoVecs.activeMembrNodeCounts[i] << "," << cellInfoVecs.maxTenRiMidXVec[i] << "," << cellInfoVecs.maxTenRiMidYVec[i] << ")" << std::endl; } int jj; std::cin >> jj; } void SceCells::assembleVecForTwoCells(uint i) { uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp1VecMem.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1VecMem[j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1VecMem[j].y; divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } } for (uint j = 0; j < membThreshold; j++) { index = i * maxAllNodePerCell + j; if (j < divAuxData.tmp2VecMem.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2VecMem[j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2VecMem[j].y; divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1MemActiveCounts.push_back(divAuxData.tmp1VecMem.size()); divAuxData.tmp2MemActiveCounts.push_back(divAuxData.tmp2VecMem.size()); for (uint j = membThreshold; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; uint shift_j = j - membThreshold; if (shift_j < divAuxData.tmp1IntnlVec.size()) { divAuxData.tmpXPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].x; divAuxData.tmpYPos1_M[index] = divAuxData.tmp1IntnlVec[shift_j].y; divAuxData.tmpIsActive1_M[index] = true; } else { divAuxData.tmpIsActive1_M[index] = false; } if (shift_j < divAuxData.tmp2IntnlVec.size()) { divAuxData.tmpXPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].x; divAuxData.tmpYPos2_M[index] = divAuxData.tmp2IntnlVec[shift_j].y; divAuxData.tmpIsActive2_M[index] = true; } else { divAuxData.tmpIsActive2_M[index] = false; } } divAuxData.tmp1InternalActiveCounts.push_back( divAuxData.tmp1IntnlVec.size()); divAuxData.tmp2InternalActiveCounts.push_back( divAuxData.tmp2IntnlVec.size()); } void SceCells::shiftIntnlNodesByCellCenter(CVector cell1Center, CVector cell2Center) { CVector tmpCell1Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { tmpCell1Center = tmpCell1Center + divAuxData.tmp1IntnlVec[j]; } tmpCell1Center = tmpCell1Center / divAuxData.tmp1IntnlVec.size(); CVector shiftVec1 = cell1Center - tmpCell1Center; for (uint j = 0; j < divAuxData.tmp1IntnlVec.size(); j++) { divAuxData.tmp1IntnlVec[j] = divAuxData.tmp1IntnlVec[j] + shiftVec1; } CVector tmpCell2Center(0, 0, 0); for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { tmpCell2Center = tmpCell2Center + divAuxData.tmp2IntnlVec[j]; } tmpCell2Center = tmpCell2Center / divAuxData.tmp2IntnlVec.size(); CVector shiftVec2 = cell2Center - tmpCell2Center; for (uint j = 0; j < divAuxData.tmp2IntnlVec.size(); j++) { divAuxData.tmp2IntnlVec[j] = divAuxData.tmp2IntnlVec[j] + shiftVec2; } } void SceCells::processMemVec(std::vector<VecVal>& tmp1, std::vector<VecVal>& tmp2) { divAuxData.tmp1VecMem.clear(); divAuxData.tmp2VecMem.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; std::sort(tmp1.begin(), tmp1.end()); std::sort(tmp2.begin(), tmp2.end()); //assert(tmp1.size() < allocPara_m.maxMembrNodePerCell); //assert(tmp2.size() < allocPara_m.maxMembrNodePerCell); uint maxDivMembrNodeCount1 = allocPara_m.maxMembrNodePerCell - tmp1.size(); uint maxDivMembrNodeCount2 = allocPara_m.maxMembrNodePerCell - tmp2.size(); std::vector<CVector> ptsBetween1, ptsBetween2; // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp1.size() >= 1) { ptsBetween1 = obtainPtsBetween(tmp1[tmp1.size() - 1].vec, tmp1[0].vec, memNewSpacing, maxDivMembrNodeCount1); } // if size is less than 1, the situation would have already been very bad. // Just keep this statement so no seg fault would happen. if (tmp2.size() >= 1) { ptsBetween2 = obtainPtsBetween(tmp2[tmp2.size() - 1].vec, tmp2[0].vec, memNewSpacing, maxDivMembrNodeCount2); } for (uint j = 0; j < tmp1.size(); j++) { divAuxData.tmp1VecMem.push_back(tmp1[j].vec); } for (uint j = 0; j < tmp2.size(); j++) { divAuxData.tmp2VecMem.push_back(tmp2[j].vec); } for (uint j = 0; j < ptsBetween1.size(); j++) { divAuxData.tmp1VecMem.push_back(ptsBetween1[j]); } for (uint j = 0; j < ptsBetween2.size(); j++) { divAuxData.tmp2VecMem.push_back(ptsBetween2[j]); } assert(divAuxData.tmp1VecMem.size() <= membThreshold); assert(divAuxData.tmp2VecMem.size() <= membThreshold); } void SceCells::obtainMembrAndIntnlNodes(uint i, vector<CVector>& membrNodes, vector<CVector>& intnlNodes) { membrNodes.clear(); intnlNodes.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (divAuxData.tmpIsActive_M[index] != true) { continue; } double posX = divAuxData.tmpNodePosX_M[index]; double posY = divAuxData.tmpNodePosY_M[index]; if (j < membThreshold) { // means node type is membrane CVector memPos(posX, posY, 0); membrNodes.push_back(memPos); } else { CVector intnlPos(posX, posY, 0); intnlNodes.push_back(intnlPos); } } } CVector SceCells::obtainCenter(uint i) { double oldCenterX = divAuxData.tmpCenterPosX_M[i]; double oldCenterY = divAuxData.tmpCenterPosY_M[i]; CVector centerPos(oldCenterX, oldCenterY, 0); return centerPos; } CVector SceCells::calDivDir_MajorAxis(CVector center, vector<CVector>& membrNodes, double& lenAlongMajorAxis) { // not the optimal algorithm but easy to code double maxDiff = 0; CVector majorAxisDir; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double min = 0, max = 0; for (uint j = 0; j < membrNodes.size(); j++) { CVector tmpDir2 = membrNodes[j] - center; double tmpVecProduct = tmpDir2 * tmpUnitDir; if (tmpVecProduct < min) { min = tmpVecProduct; } if (tmpVecProduct > max) { max = tmpVecProduct; } } double diff = max - min; if (diff > maxDiff) { maxDiff = diff; majorAxisDir = tmpUnitDir; } } lenAlongMajorAxis = maxDiff; return majorAxisDir; } //A&A double SceCells::calLengthAlongHertwigAxis(CVector divDir, CVector center, vector<CVector>& membrNodes) { CVector divDirUnit = divDir.getUnitVector(); double minUnit = 0, maxUnit = 0; double minOveral = 0, maxOveral = 0; for (uint i = 0; i < membrNodes.size(); i++) { CVector tmpDir = membrNodes[i] - center; CVector tmpUnitDir = tmpDir.getUnitVector(); double tmpVecProductUnit = divDirUnit * tmpUnitDir; double tmpVecProductOveral = divDirUnit * tmpDir; if (tmpVecProductUnit < minUnit) { minUnit = tmpVecProductUnit; minOveral = tmpVecProductOveral; } if (tmpVecProductUnit > maxUnit) { maxUnit = tmpVecProductUnit; maxOveral = tmpVecProductOveral; } } double lenAlongHertwigAxis = maxOveral - minOveral; return lenAlongHertwigAxis; } void SceCells::obtainTwoNewCenters(CVector& oldCenter, CVector& divDir, double len_MajorAxis, CVector& centerNew1, CVector& centerNew2) { CVector divDirUnit = divDir.getUnitVector(); double lenChange = len_MajorAxis / 2.0 * centerShiftRatio; centerNew1 = oldCenter + lenChange * divDirUnit; centerNew2 = oldCenter - lenChange * divDirUnit; } void SceCells::prepareTmpVec(uint i, CVector divDir, CVector oldCenter, std::vector<VecVal>& tmp1, std::vector<VecVal>& tmp2) { tmp1.clear(); tmp2.clear(); uint membThreshold = allocPara_m.maxMembrNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint index; VecVal tmpData; CVector splitDir = divDir.rotateNintyDeg_XY_CC(); for (uint j = 0; j < maxAllNodePerCell; j++) { index = i * maxAllNodePerCell + j; if (j < membThreshold) { // means node type is membrane if (divAuxData.tmpIsActive_M[index] == true) { CVector memPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = memPos - oldCenter; CVector centerToPosUnit = centerToPosDir.getUnitVector(); CVector crossProduct = Cross(centerToPosDir, splitDir); double dotProduct = centerToPosUnit * splitDir; tmpData.val = dotProduct; tmpData.vec = memPos; if (crossProduct.z >= 0) { // counter-cloce wise tmp1.push_back(tmpData); } else { // cloce wise tmp2.push_back(tmpData); } } } else { if (divAuxData.tmpIsActive_M[index] == true) { CVector internalPos(divAuxData.tmpNodePosX_M[index], divAuxData.tmpNodePosY_M[index], 0); CVector centerToPosDir = internalPos - oldCenter; CVector shrinkedPos = centerToPosDir * shrinkRatio + oldCenter; double dotProduct = centerToPosDir * divDir; if (dotProduct > 0) { divAuxData.tmp1IntnlVec.push_back(shrinkedPos); } else { divAuxData.tmp2IntnlVec.push_back(shrinkedPos); } } } } } void SceCells::calCellArea() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalTriArea(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellAreaVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } //AAMIRI added to calculate Perimeter of each cell void SceCells::calCellPerim() { thrust::counting_iterator<uint> iBegin(0), iBegin2(0); totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); thrust::reduce_by_key( make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)) + totalNodeCountForActiveCells, thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), make_permutation_iterator( cellInfoVecs.centerCoordX.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))), make_permutation_iterator( cellInfoVecs.centerCoordY.begin(), make_transform_iterator(iBegin, DivideFunctor( maxAllNodePerCell))))), CalPerim(maxAllNodePerCell, nodeIsActiveAddr, nodeLocXAddr, nodeLocYAddr)), cellInfoVecs.cellRanksTmpStorage.begin(), cellInfoVecs.cellPerimVec.begin(), thrust::equal_to<uint>(), thrust::plus<double>()); } CellsStatsData SceCells::outputPolyCountData() { cout << " I am at begining of outpolycount"<< std::flush ; std::cout.flush(); double sumX,sumY,cntr_X_Domain,cntr_Y_Domain ; int BdryApproach ; BdryApproach=1 ; totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; cout << " I am before cells area"<< endl ; calCellArea(); cout << " I am after cells area" << endl ; calCellPerim();//AAMIRI CellsStatsData result; cout << " I am after result" << endl ; uint bdryCriteria = globalConfigVars.getConfigValue("BdryCellCriteria").toInt(); // already on host; no need to call thrust::copy thrust::host_vector<int> adhIndxHost = nodes->getInfoVecs().nodeAdhIndxHostCopy; thrust::host_vector<double> growthProVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.growthProgress.begin(), cellInfoVecs.growthProgress.begin() + allocPara_m.currentActiveCellCount, growthProVecHost.begin()); thrust::host_vector<double> growthProMembrVecHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.membrGrowProgress.begin(), cellInfoVecs.membrGrowProgress.begin() + allocPara_m.currentActiveCellCount, growthProMembrVecHost.begin()); thrust::host_vector<uint> activeMembrNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeMembrNodeCounts.begin(), cellInfoVecs.activeMembrNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeMembrNodeCountHost.begin()); thrust::host_vector<uint> activeIntnlNodeCountHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.activeIntnlNodeCounts.begin(), cellInfoVecs.activeIntnlNodeCounts.begin() + allocPara_m.currentActiveCellCount, activeIntnlNodeCountHost.begin()); thrust::host_vector<double> centerCoordXHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> centerCoordYHost( allocPara_m.currentActiveCellCount); thrust::copy(cellInfoVecs.centerCoordX.begin(), cellInfoVecs.centerCoordX.begin() + allocPara_m.currentActiveCellCount, centerCoordXHost.begin()); thrust::copy(cellInfoVecs.centerCoordY.begin(), cellInfoVecs.centerCoordY.begin() + allocPara_m.currentActiveCellCount, centerCoordYHost.begin()); thrust::host_vector<double> cellAreaHost( allocPara_m.currentActiveCellCount); thrust::host_vector<double> cellPerimHost( allocPara_m.currentActiveCellCount);//AAMIRI thrust::host_vector<double> cellDppHost( allocPara_m.currentActiveCellCount);//Ali thrust::copy(cellInfoVecs.cellAreaVec.begin(), cellInfoVecs.cellAreaVec.begin() + allocPara_m.currentActiveCellCount, cellAreaHost.begin()); thrust::copy(cellInfoVecs.cellPerimVec.begin(), cellInfoVecs.cellPerimVec.begin() + allocPara_m.currentActiveCellCount, cellPerimHost.begin());//AAMIRI thrust::copy(cellInfoVecs.cell_Dpp.begin(), cellInfoVecs.cell_Dpp.begin() + allocPara_m.currentActiveCellCount, cellDppHost.begin());//Ali sumX=0 ; sumY=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { CellStats cellStatsData; cellStatsData.cellGrowthProgress = growthProVecHost[i]; cellStatsData.cellRank = i; bool isBdry = false; std::set<int> neighbors; std::vector<int> neighborsV; //Ali int neighborStrength[10]; //Ali int continousNoAdh = 0; map <int, int> cellAndNeighborRank ; //Ali //std::cout << "printing adhesion indicies "; //for (int ii=0 ; ii<neighborStrength.size() ; ii++){ for (int ii=0 ; ii< 10; ii++){ //Ali neighborStrength[ii]=0 ; } cellAndNeighborRank.clear(); //Ali for (uint j = 0; j < activeMembrNodeCountHost[i]; j++) { uint index = i * allocPara_m.maxAllNodePerCell + j; //std::cout << adhIndxHost[index] << ","; if (adhIndxHost[index] != -1) { uint adhCellRank = adhIndxHost[index] / allocPara_m.maxAllNodePerCell; //std::cout << adhCellRank << " "; neighbors.insert(adhCellRank); map <int, int>:: iterator iteratorMap=cellAndNeighborRank.find(adhCellRank); //Ali if (iteratorMap==cellAndNeighborRank.end()) { //Ali int NewneighborRank= neighbors.size()-1; //Ali cellAndNeighborRank[adhCellRank]=NewneighborRank; //Ali neighborStrength[NewneighborRank]=neighborStrength[NewneighborRank]+1 ; //Ali neighborsV.push_back(adhCellRank); //Ali } else { //Ali int oldNeighborRank=(*iteratorMap).second ; neighborStrength[oldNeighborRank]=neighborStrength[oldNeighborRank]+1 ; //Ali } continousNoAdh = 0; } else { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } } if (j == activeMembrNodeCountHost[i] - 1 && adhIndxHost[index] == -1) { int k = 0; uint indexNew; while (k < activeMembrNodeCountHost[i] - 1) { indexNew = i * allocPara_m.maxAllNodePerCell + k; if (adhIndxHost[indexNew] == -1) { continousNoAdh = continousNoAdh + 1; if (continousNoAdh > bdryCriteria) { isBdry = true; } k++; } else { break; } } } } cellStatsData.isBdryCell = isBdry; cellStatsData.numNeighbors = neighbors.size(); cellStatsData.currentActiveMembrNodes = activeMembrNodeCountHost[i]; cellStatsData.currentActiveIntnlNodes = activeIntnlNodeCountHost[i]; cellStatsData.neighborVec = neighbors; cellStatsData.neighborVecV = neighborsV; //Ali for (int iiii=0; iiii<10 ; iiii++){ cellStatsData.cellNeighborStrength[iiii] = neighborStrength[iiii]; } //Ali cellStatsData.membrGrowthProgress = growthProMembrVecHost[i]; cellStatsData.cellCenter = CVector(centerCoordXHost[i], centerCoordYHost[i], 0); cellStatsData.cellArea = cellAreaHost[i]; cellStatsData.cellPerim = cellPerimHost[i];//AAMIRI cellStatsData.cellDpp = cellDppHost[i];//Ali result.cellsStats.push_back(cellStatsData); sumX=sumX+cellStatsData.cellCenter.x ; sumY=sumY+cellStatsData.cellCenter.y ; } //Ali if (BdryApproach==2) { cout << "sumX=" << sumX << endl ; cout << "sumY=" << sumY << endl ; cntr_X_Domain=sumX/result.cellsStats.size() ; cntr_Y_Domain=sumY/result.cellsStats.size() ; cout << "cntr_X=" << cntr_X_Domain << endl ; cout << "cntr_Y=" << cntr_Y_Domain << endl ; double R_Max ; double Distance ; R_Max=0 ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > R_Max) { R_Max=Distance ; } } cout << "R_Max=" << R_Max << endl ; for (uint i = 0; i < allocPara_m.currentActiveCellCount; i++) { Distance=sqrt( pow(centerCoordXHost[i]-cntr_X_Domain,2) +pow(centerCoordYHost[i]-cntr_Y_Domain,2) ) ; if (Distance > 0.9* R_Max) { result.cellsStats[i].isBdryCell = true; cout << "isBdryCell"<< i<< endl ; } else { result.cellsStats[i].isBdryCell = false; cout << "isNormalCell"<< i << endl ; } } } //Ali cout << "I want to write data" << endl ; // ofstream Stress_Strain_Single ; //Stress_Strain_Single.open("Stress_Strain_Single.txt"); //Stress_Strain_Single.close() ; //Ali result.MaxDistanceX=abs(centerCoordXHost[1]-centerCoordXHost[0]); //Ali result.Cells_Extrem_Loc[0]=Tisu_MinX; result.Cells_Extrem_Loc[1]=Tisu_MaxX; result.Cells_Extrem_Loc[2]=Tisu_MinY; result.Cells_Extrem_Loc[3]=Tisu_MaxY ; result.F_Ext_Out=membrPara.F_Ext_Incline*curTime ; //if (dt==curTime) { //result.Init_Displace=MaxX-MinX ; // } //Ali return result; } __device__ bool bigEnough(double& num) { if (num > minDivisor) { return true; } else { return false; } } __device__ double cross_Z(double vecA_X, double vecA_Y, double vecB_X, double vecB_Y) { return vecA_X * vecB_Y - vecA_Y * vecB_X; } /* __device__ double calBendMulti(double& angle, uint activeMembrCt) { double equAngle = PI - PI / activeMembrCt; return bendCoeff * (angle - equAngle); } */ //AAMIRI __device__ double calBendMulti_Mitotic(double& angle, uint activeMembrCt, double& progress, double mitoticCri) { double equAngle = PI - PI / activeMembrCt; if (progress <= mitoticCri){ return bendCoeff * (angle - equAngle);} else{ return (angle - equAngle)*(bendCoeff + (bendCoeff_Mitotic - bendCoeff) * (progress - mitoticCri)/(1.0 - mitoticCri)); } } void SceCells::applySceCellDisc_M() { totalNodeCountForActiveCells = allocPara_m.currentActiveCellCount * allocPara_m.maxAllNodePerCell; uint maxAllNodePerCell = allocPara_m.maxAllNodePerCell; uint maxMemNodePerCell = allocPara_m.maxMembrNodePerCell; thrust::counting_iterator<uint> iBegin(0); double* nodeLocXAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocX[0])); double* nodeLocYAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeLocY[0])); bool* nodeIsActiveAddr = thrust::raw_pointer_cast( &(nodes->getInfoVecs().nodeIsActive[0])); double grthPrgrCriVal_M =growthAuxData.grthPrgrCriVal_M_Ori ; // for now constant growthAuxData.grthProgrEndCPU // - growthAuxData.prolifDecay // * (growthAuxData.grthProgrEndCPU // - growthAuxData.grthPrgrCriVal_M_Ori); thrust::transform( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_permutation_iterator( cellInfoVecs.activeMembrNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), thrust::make_permutation_iterator( cellInfoVecs.activeIntnlNodeCounts.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell)), make_transform_iterator(iBegin, ModuloFunctor(maxAllNodePerCell)), thrust::make_permutation_iterator( cellInfoVecs.growthProgress.begin(), make_transform_iterator(iBegin, DivideFunctor(maxAllNodePerCell))), nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin())) + totalNodeCountForActiveCells, thrust::make_zip_iterator( thrust::make_tuple(nodes->getInfoVecs().nodeVelX.begin(), nodes->getInfoVecs().nodeVelY.begin(), nodes->getInfoVecs().nodeF_MI_M_x.begin(), //Ali added for cell pressure calculation nodes->getInfoVecs().nodeF_MI_M_y.begin())),// ALi added for cell pressure calculation AddSceCellForce(maxAllNodePerCell, maxMemNodePerCell, nodeLocXAddr, nodeLocYAddr, nodeIsActiveAddr, grthPrgrCriVal_M)); } __device__ void calAndAddIB_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } //Ali function added for eventually computing pressure for each cells __device__ void calAndAddIB_M2(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double & F_MI_M_x, double & F_MI_M_y, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIBDiv_M[4]) { forceValue = -sceIBDiv_M[0] / sceIBDiv_M[2] * exp(-linkLength / sceIBDiv_M[2]) + sceIBDiv_M[1] / sceIBDiv_M[3] * exp(-linkLength / sceIBDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIBDiv_M[4]) + (1.0 - percent) * sceIB_M[4]; if (linkLength < lenLimit) { double intnlBPara0 = percent * (sceIBDiv_M[0]) + (1.0 - percent) * sceIB_M[0]; double intnlBPara1 = percent * (sceIBDiv_M[1]) + (1.0 - percent) * sceIB_M[1]; double intnlBPara2 = percent * (sceIBDiv_M[2]) + (1.0 - percent) * sceIB_M[2]; double intnlBPara3 = percent * (sceIBDiv_M[3]) + (1.0 - percent) * sceIB_M[3]; forceValue = -intnlBPara0 / intnlBPara2 * exp(-linkLength / intnlBPara2) + intnlBPara1 / intnlBPara3 * exp(-linkLength / intnlBPara3); } } else { if (linkLength < sceIB_M[4]) { forceValue = -sceIB_M[0] / sceIB_M[2] * exp(-linkLength / sceIB_M[2]) + sceIB_M[1] / sceIB_M[3] * exp(-linkLength / sceIB_M[3]); } } F_MI_M_x=F_MI_M_x+forceValue * (xPos2 - xPos) / linkLength; F_MI_M_y=F_MI_M_y+forceValue * (yPos2 - yPos) / linkLength; xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; } __device__ void calAndAddII_M(double& xPos, double& yPos, double& xPos2, double& yPos2, double& growPro, double& xRes, double& yRes, double grthPrgrCriVal_M) { double linkLength = compDist2D(xPos, yPos, xPos2, yPos2); double forceValue = 0; if (growPro > grthPrgrCriEnd_M) { if (linkLength < sceIIDiv_M[4]) { forceValue = -sceIIDiv_M[0] / sceIIDiv_M[2] * exp(-linkLength / sceIIDiv_M[2]) + sceIIDiv_M[1] / sceIIDiv_M[3] * exp(-linkLength / sceIIDiv_M[3]); } } else if (growPro > grthPrgrCriVal_M) { double percent = (growPro - grthPrgrCriVal_M) / (grthPrgrCriEnd_M - grthPrgrCriVal_M); double lenLimit = percent * (sceIIDiv_M[4]) + (1.0 - percent) * sceII_M[4]; if (linkLength < lenLimit) { double intraPara0 = percent * (sceIIDiv_M[0]) + (1.0 - percent) * sceII_M[0]; double intraPara1 = percent * (sceIIDiv_M[1]) + (1.0 - percent) * sceII_M[1]; double intraPara2 = percent * (sceIIDiv_M[2]) + (1.0 - percent) * sceII_M[2]; double intraPara3 = percent * (sceIIDiv_M[3]) + (1.0 - percent) * sceII_M[3]; forceValue = -intraPara0 / intraPara2 * exp(-linkLength / intraPara2) + intraPara1 / intraPara3 * exp(-linkLength / intraPara3); } } else { if (linkLength < sceII_M[4]) { forceValue = -sceII_M[0] / sceII_M[2] * exp(-linkLength / sceII_M[2]) + sceII_M[1] / sceII_M[3] * exp(-linkLength / sceII_M[3]); } } xRes = xRes + forceValue * (xPos2 - xPos) / linkLength; yRes = yRes + forceValue * (yPos2 - yPos) / linkLength; }
b430fb51e36259873e2face5195585e4802be53a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <chrono> #include <iostream> #include "adolc/adoublecuda.h" using namespace std; inline hipError_t checkCuda(hipError_t result) { if (result != hipSuccess) { printf("CUDA Runtime Error: %s:%d, ", __FILE__, __LINE__); printf("Code: %d, message: %s\n", result, hipGetErrorString(result)); } return result; } __global__ void kernel(double* inx, double* outy, double* outderiv) { // const int index = threadIdx.x; const int index1 = threadIdx.y; // const int index2 = blockIdx.x; // const int dim = blockDim.x * blockDim.y; // const int index3 = blockDim.x; int col = threadIdx.x + blockDim.x * blockIdx.x; // width int row = threadIdx.y + blockIdx.y * blockDim.y; // height int M = 8; if (col < 3 && row < M) { int global_index = row * 3 + col; // printf("Col: %d, Row:%d, global_index: %d\n", col, row, global_index); // Declare dependent and independent variables as adoubles adtlc::adouble y[3]; adtlc::adouble x[3]; // Read out point for function evaluation // for (int i = 0; i < 3; i++) x[i] = inx[index2 * dim + index * 3 + i]; x[global_index] = inx[global_index]; // Set direction for calculation of derivatives x[index1].setADValue(1); // Function evaluation y[0] = sqrt(x[0] * x[0] + x[1] * x[1] + x[2] * x[2]); y[1] = atan(sqrt(x[0] * x[0] + x[1] * x[1]) / x[2]); y[2] = atan(x[1] / x[0]); // for (int i = 0; i < 3; i++) // outy[(index2 * index3 + index) * 3 + i] = y[i].getValue(); outy[global_index] = y[global_index].getValue(); // for (int i = 0; i < 3; i++) // outderiv[(index2 * dim + index * 3 + index1) * 3 + i] = // y[i].getADValue(); outderiv[global_index] = y[global_index].getADValue(); } } hipError_t kernellaunch(double* inx, double* outy, double* outderiv, int n) { // Two dimensional (M/Blocks) x 3 blocks dim3 threadsPerBlock(8, 3); // Create 16 blocks dim3 Blocks(n / threadsPerBlock.x + 1, n / threadsPerBlock.y + 1); // Call kernel function with 16 blocks with (M/Blocks) x 3 threads per block // kernel<<<Blocks, threadsPerBlock>>>(inx, outy, outderiv); hipError_t cudaErr = hipGetLastError(); return cudaErr; } int main() { int M = 8; double* deriv = new double[9 * M]; double* y = new double[3 * M]; double* x = new double[3 * M]; // Initialize x_i for (int k = 0; k < M; k++) { for (int i = 0; i < 3; ++i) x[k * 3 + i] = i + 1 / (k + 1); } // for (int k = 0; k < M; k++) { // for (int i = 0; i < 3; ++i) { // printf("x[%d]: %f.3\n", k * 3 + i, x[k * 3 + i]); // } // } hipFree(0); auto t1 = std::chrono::steady_clock::now(); // Allocate array for independent and dependent variables and Jacobian // matrices on GPU // checkCuda(hipFree(0)); double* devx; checkCuda(hipMalloc((void**)&devx, 3 * M * sizeof(double))); double* devy; checkCuda(hipMalloc((void**)&devy, 3 * M * sizeof(double))); double* devderiv; checkCuda(hipMalloc((void**)&devderiv, 3 * 3 * M * sizeof(double))); // Copy values of independent variables from host to GPU hipMemcpy(devx, x, sizeof(double) * 3 * M, hipMemcpyHostToDevice); hipError_t error; auto t2 = std::chrono::steady_clock::now(); std::cout << "prepare time on CUDA(ms):" << double(std::chrono::duration<double>(t2 - t1).count() * 1000) << std::endl; // Call function to specify amount of blocks and threads to be used error = kernellaunch(devx, devy, devderiv, M); // hipDeviceSynchronize(); auto t3 = std::chrono::steady_clock::now(); std::cout << "kernel launch time on CUDA(ms):" << double(std::chrono::duration<double>(t3 - t2).count() * 1000) << std::endl; // error = hipGetLastError(); if (error != hipSuccess) { printf("Code: %d, Message: %s\n", error, hipGetErrorString(error)); } // Copy values of dependent variables and Jacobian matrices from GPU to host hipMemcpy(y, devy, sizeof(double) * 3 * M, hipMemcpyDeviceToHost); hipMemcpy(deriv, devderiv, sizeof(double) * M * 3 * 3, hipMemcpyDeviceToHost); hipFree(devx); hipFree(devy); hipFree(devderiv); auto t4 = std::chrono::steady_clock::now(); std::cout << "Total time on CUDA(ms):" << double(std::chrono::duration<double>(t4 - t1).count() * 1000) << std::endl; }
b430fb51e36259873e2face5195585e4802be53a.cu
#include <cuda.h> #include <chrono> #include <iostream> #include "adolc/adoublecuda.h" using namespace std; inline cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { printf("CUDA Runtime Error: %s:%d, ", __FILE__, __LINE__); printf("Code: %d, message: %s\n", result, cudaGetErrorString(result)); } return result; } __global__ void kernel(double* inx, double* outy, double* outderiv) { // const int index = threadIdx.x; const int index1 = threadIdx.y; // const int index2 = blockIdx.x; // const int dim = blockDim.x * blockDim.y; // const int index3 = blockDim.x; int col = threadIdx.x + blockDim.x * blockIdx.x; // width int row = threadIdx.y + blockIdx.y * blockDim.y; // height int M = 8; if (col < 3 && row < M) { int global_index = row * 3 + col; // printf("Col: %d, Row:%d, global_index: %d\n", col, row, global_index); // Declare dependent and independent variables as adoubles adtlc::adouble y[3]; adtlc::adouble x[3]; // Read out point for function evaluation // for (int i = 0; i < 3; i++) x[i] = inx[index2 * dim + index * 3 + i]; x[global_index] = inx[global_index]; // Set direction for calculation of derivatives x[index1].setADValue(1); // Function evaluation y[0] = sqrt(x[0] * x[0] + x[1] * x[1] + x[2] * x[2]); y[1] = atan(sqrt(x[0] * x[0] + x[1] * x[1]) / x[2]); y[2] = atan(x[1] / x[0]); // for (int i = 0; i < 3; i++) // outy[(index2 * index3 + index) * 3 + i] = y[i].getValue(); outy[global_index] = y[global_index].getValue(); // for (int i = 0; i < 3; i++) // outderiv[(index2 * dim + index * 3 + index1) * 3 + i] = // y[i].getADValue(); outderiv[global_index] = y[global_index].getADValue(); } } cudaError_t kernellaunch(double* inx, double* outy, double* outderiv, int n) { // Two dimensional (M/Blocks) x 3 blocks dim3 threadsPerBlock(8, 3); // Create 16 blocks dim3 Blocks(n / threadsPerBlock.x + 1, n / threadsPerBlock.y + 1); // Call kernel function with 16 blocks with (M/Blocks) x 3 threads per block // kernel<<<Blocks, threadsPerBlock>>>(inx, outy, outderiv); cudaError_t cudaErr = cudaGetLastError(); return cudaErr; } int main() { int M = 8; double* deriv = new double[9 * M]; double* y = new double[3 * M]; double* x = new double[3 * M]; // Initialize x_i for (int k = 0; k < M; k++) { for (int i = 0; i < 3; ++i) x[k * 3 + i] = i + 1 / (k + 1); } // for (int k = 0; k < M; k++) { // for (int i = 0; i < 3; ++i) { // printf("x[%d]: %f.3\n", k * 3 + i, x[k * 3 + i]); // } // } cudaFree(0); auto t1 = std::chrono::steady_clock::now(); // Allocate array for independent and dependent variables and Jacobian // matrices on GPU // checkCuda(cudaFree(0)); double* devx; checkCuda(cudaMalloc((void**)&devx, 3 * M * sizeof(double))); double* devy; checkCuda(cudaMalloc((void**)&devy, 3 * M * sizeof(double))); double* devderiv; checkCuda(cudaMalloc((void**)&devderiv, 3 * 3 * M * sizeof(double))); // Copy values of independent variables from host to GPU cudaMemcpy(devx, x, sizeof(double) * 3 * M, cudaMemcpyHostToDevice); cudaError_t error; auto t2 = std::chrono::steady_clock::now(); std::cout << "prepare time on CUDA(ms):" << double(std::chrono::duration<double>(t2 - t1).count() * 1000) << std::endl; // Call function to specify amount of blocks and threads to be used error = kernellaunch(devx, devy, devderiv, M); // cudaDeviceSynchronize(); auto t3 = std::chrono::steady_clock::now(); std::cout << "kernel launch time on CUDA(ms):" << double(std::chrono::duration<double>(t3 - t2).count() * 1000) << std::endl; // error = cudaGetLastError(); if (error != cudaSuccess) { printf("Code: %d, Message: %s\n", error, cudaGetErrorString(error)); } // Copy values of dependent variables and Jacobian matrices from GPU to host cudaMemcpy(y, devy, sizeof(double) * 3 * M, cudaMemcpyDeviceToHost); cudaMemcpy(deriv, devderiv, sizeof(double) * M * 3 * 3, cudaMemcpyDeviceToHost); cudaFree(devx); cudaFree(devy); cudaFree(devderiv); auto t4 = std::chrono::steady_clock::now(); std::cout << "Total time on CUDA(ms):" << double(std::chrono::duration<double>(t4 - t1).count() * 1000) << std::endl; }
a49293bced752453aa8d9b124cd95d18f12526ca.hip
// !!! This is a file automatically generated by hipify!!! #include "type.cuh" #include "utils.cuh" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" //#include "app.cu" #include<cuda.h> __device__ double Eval(IPTR pj, int* chroms, int index); //parallelizes evaluation across many gpu threads __global__ //void EvalPopulation(IPTR pop, int start, int end, int fitnesses[2]) //, Population *p) void EvalPopulation(IPTR pop, int start, int end, int* chroms) { int i; IPTR pj; //printf("start: %d\n", start); //select index based on thread/block id int index = start + blockIdx.x * blockDim.x + threadIdx.x; //printf("index: %d\n", index); //fitnesses[0] = 2; //calculate stride based on thread/block id int stride = blockDim.x * gridDim.x; //printf("Got here 1\n"); //printf("Got here 1.5\n"); //if(p->maximize){ // printf("Got here 2\n"); // for(i = index; i < end; i += stride){ if (index < end) { pj = &pop[index]; //int* chrom = (int*)malloc(pj->chromLen); //printf("Got here 2.0, index: %d\n", index); //for (int i = 0; i < pj->chromLen; i++) // { // printf("chrom: %d\n", chroms[index*(pj->chromLen)]); // } // // printf("got here1\n"); //printf("got here3\n"); //printf("chrom2: %d\n", pj->chrom[0]); pj->objfunc = Eval(pj, chroms, index); // printf("Got here 2.1\n"); // printf("Got here 2.1, index: %d\n", index); //pj->objfunc = Eval(pj); //pj->fitness = Eval(pj); //fitnesses[index] = 789;//Eval(pj); //printf("Got here 2.2\n"); //pj->fitness = 80; // p->maxConst - pj->objfunc; //printf("\nreturned fitness of: %d\n", pj->fitness); } /* }else { printf("Got here 3\n"); for (i = index; i < end; i += stride) { pj = &pop[i]; pj->objfunc = Eval(pj); pj->fitness = p->maxConst - pj->objfunc; printf("\nreturned fitness of: %d", pj->fitness); } }*/ //printf("Got here 4\n"); return; }
a49293bced752453aa8d9b124cd95d18f12526ca.cu
#include "type.cuh" #include "utils.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" //#include "app.cu" #include<cuda.h> __device__ double Eval(IPTR pj, int* chroms, int index); //parallelizes evaluation across many gpu threads __global__ //void EvalPopulation(IPTR pop, int start, int end, int fitnesses[2]) //, Population *p) void EvalPopulation(IPTR pop, int start, int end, int* chroms) { int i; IPTR pj; //printf("start: %d\n", start); //select index based on thread/block id int index = start + blockIdx.x * blockDim.x + threadIdx.x; //printf("index: %d\n", index); //fitnesses[0] = 2; //calculate stride based on thread/block id int stride = blockDim.x * gridDim.x; //printf("Got here 1\n"); //printf("Got here 1.5\n"); //if(p->maximize){ // printf("Got here 2\n"); // for(i = index; i < end; i += stride){ if (index < end) { pj = &pop[index]; //int* chrom = (int*)malloc(pj->chromLen); //printf("Got here 2.0, index: %d\n", index); //for (int i = 0; i < pj->chromLen; i++) // { // printf("chrom: %d\n", chroms[index*(pj->chromLen)]); // } // // printf("got here1\n"); //printf("got here3\n"); //printf("chrom2: %d\n", pj->chrom[0]); pj->objfunc = Eval(pj, chroms, index); // printf("Got here 2.1\n"); // printf("Got here 2.1, index: %d\n", index); //pj->objfunc = Eval(pj); //pj->fitness = Eval(pj); //fitnesses[index] = 789;//Eval(pj); //printf("Got here 2.2\n"); //pj->fitness = 80; // p->maxConst - pj->objfunc; //printf("\nreturned fitness of: %d\n", pj->fitness); } /* }else { printf("Got here 3\n"); for (i = index; i < end; i += stride) { pj = &pop[i]; pj->objfunc = Eval(pj); pj->fitness = p->maxConst - pj->objfunc; printf("\nreturned fitness of: %d", pj->fitness); } }*/ //printf("Got here 4\n"); return; }
e3376b2ccbc5335b04e465fc1617ef66e88bce9a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "avro.h" #include "avro_gpu.h" #include "thrust/iterator/transform_output_iterator.h" #include <io/comp/gpuinflate.h> #include <io/utilities/column_buffer.hpp> #include <io/utilities/hostdevice_vector.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/io/datasource.hpp> #include <cudf/io/detail/avro.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <cudf/utilities/traits.hpp> #include <numeric> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <nvcomp/snappy.h> #include <memory> #include <string> #include <utility> #include <vector> using cudf::device_span; namespace cudf { namespace io { namespace detail { namespace avro { // Import functionality that's independent of legacy code using namespace cudf::io::avro; using namespace cudf::io; namespace { /** * @brief Function that translates Avro data kind to cuDF type enum */ type_id to_type_id(avro::schema_entry const* col) { switch (col->kind) { case avro::type_boolean: return type_id::BOOL8; case avro::type_int: return type_id::INT32; case avro::type_long: return type_id::INT64; case avro::type_float: return type_id::FLOAT32; case avro::type_double: return type_id::FLOAT64; case avro::type_bytes: case avro::type_string: return type_id::STRING; case avro::type_enum: return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32; default: return type_id::EMPTY; } } } // namespace /** * @brief A helper wrapper for Avro file metadata. Provides some additional * convenience methods for initializing and accessing the metadata and schema */ class metadata : public file_metadata { public: explicit metadata(datasource* const src) : source(src) {} /** * @brief Initializes the parser and filters down to a subset of rows * * @param[in,out] row_start Starting row of the selection * @param[in,out] row_count Total number of rows selected */ void init_and_select_rows(int& row_start, int& row_count) { auto const buffer = source->host_read(0, source->size()); avro::container pod(buffer->data(), buffer->size()); CUDF_EXPECTS(pod.parse(this, row_count, row_start), "Cannot parse metadata"); row_start = skip_rows; row_count = num_rows; } /** * @brief Filters and reduces down to a selection of columns * * @param[in] use_names List of column names to select * * @return List of column names */ auto select_columns(std::vector<std::string> use_names) { std::vector<std::pair<int, std::string>> selection; auto const num_avro_columns = static_cast<int>(columns.size()); if (!use_names.empty()) { int index = 0; for (auto const& use_name : use_names) { for (int i = 0; i < num_avro_columns; ++i, ++index) { if (index >= num_avro_columns) { index = 0; } if (columns[index].name == use_name && type_id::EMPTY != to_type_id(&schema[columns[index].schema_data_idx])) { selection.emplace_back(index, columns[index].name); index++; break; } } } CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns"); } else { for (int i = 0; i < num_avro_columns; ++i) { // Exclude array columns (unsupported) bool column_in_array = false; for (int parent_idx = schema[columns[i].schema_data_idx].parent_idx; parent_idx > 0; parent_idx = schema[parent_idx].parent_idx) { if (schema[parent_idx].kind == avro::type_array) { column_in_array = true; break; } } if (!column_in_array) { auto col_type = to_type_id(&schema[columns[i].schema_data_idx]); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type"); selection.emplace_back(i, columns[i].name); } } } return selection; } private: datasource* const source; }; rmm::device_buffer decompress_data(datasource& source, metadata& meta, rmm::device_buffer const& comp_block_data, rmm::cuda_stream_view stream) { if (meta.codec == "deflate") { size_t uncompressed_data_size = 0; auto inflate_in = hostdevice_vector<gpu_inflate_input_s>(meta.block_list.size(), stream); auto inflate_out = hostdevice_vector<gpu_inflate_status_s>(meta.block_list.size(), stream); // Guess an initial maximum uncompressed block size uint32_t initial_blk_len = (meta.max_block_size * 2 + 0xfff) & ~0xfff; uncompressed_data_size = initial_blk_len * meta.block_list.size(); for (size_t i = 0; i < inflate_in.size(); ++i) { inflate_in[i].dstSize = initial_blk_len; } rmm::device_buffer decomp_block_data(uncompressed_data_size, stream); auto const base_offset = meta.block_list[0].offset; for (size_t i = 0, dst_pos = 0; i < meta.block_list.size(); i++) { auto const src_pos = meta.block_list[i].offset - base_offset; inflate_in[i].srcDevice = static_cast<uint8_t const*>(comp_block_data.data()) + src_pos; inflate_in[i].srcSize = meta.block_list[i].size; inflate_in[i].dstDevice = static_cast<uint8_t*>(decomp_block_data.data()) + dst_pos; // Update blocks offsets & sizes to refer to uncompressed data meta.block_list[i].offset = dst_pos; meta.block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize); dst_pos += meta.block_list[i].size; } for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) { inflate_in.host_to_device(stream); CUDA_TRY( hipMemsetAsync(inflate_out.device_ptr(), 0, inflate_out.memory_size(), stream.value())); CUDA_TRY(gpuinflate( inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), 0, stream)); inflate_out.device_to_host(stream, true); // Check if larger output is required, as it's not known ahead of time if (loop_cnt == 0) { size_t actual_uncompressed_size = 0; for (size_t i = 0; i < meta.block_list.size(); i++) { // If error status is 1 (buffer too small), the `bytes_written` field // is actually contains the uncompressed data size if (inflate_out[i].status == 1 && inflate_out[i].bytes_written > inflate_in[i].dstSize) { inflate_in[i].dstSize = inflate_out[i].bytes_written; } actual_uncompressed_size += inflate_in[i].dstSize; } if (actual_uncompressed_size > uncompressed_data_size) { decomp_block_data.resize(actual_uncompressed_size, stream); for (size_t i = 0, dst_pos = 0; i < meta.block_list.size(); i++) { auto dst_base = static_cast<uint8_t*>(decomp_block_data.data()); inflate_in[i].dstDevice = dst_base + dst_pos; meta.block_list[i].offset = dst_pos; meta.block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize); dst_pos += meta.block_list[i].size; } } else { break; } } } return decomp_block_data; } else if (meta.codec == "snappy") { size_t const num_blocks = meta.block_list.size(); // comp_block_data contains contents of the avro file starting from the first block, excluding // file header. meta.block_list[i].offset refers to offset of block i in the file, including // file header. // Find ptrs to each compressed block in comp_block_data by removing header offset. hostdevice_vector<void const*> compressed_data_ptrs(num_blocks, stream); std::transform(meta.block_list.begin(), meta.block_list.end(), compressed_data_ptrs.host_ptr(), [&](auto const& block) { return static_cast<std::byte const*>(comp_block_data.data()) + (block.offset - meta.block_list[0].offset); }); compressed_data_ptrs.host_to_device(stream); hostdevice_vector<size_t> compressed_data_sizes(num_blocks, stream); std::transform(meta.block_list.begin(), meta.block_list.end(), compressed_data_sizes.host_ptr(), [](auto const& block) { return block.size; }); compressed_data_sizes.host_to_device(stream); hostdevice_vector<size_t> uncompressed_data_sizes(num_blocks, stream); nvcompStatus_t status = nvcompBatchedSnappyGetDecompressSizeAsync(compressed_data_ptrs.device_ptr(), compressed_data_sizes.device_ptr(), uncompressed_data_sizes.device_ptr(), num_blocks, stream.value()); CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "Unable to get uncompressed sizes for snappy compressed blocks"); uncompressed_data_sizes.device_to_host(stream, true); size_t const uncompressed_data_size = std::reduce(uncompressed_data_sizes.begin(), uncompressed_data_sizes.end()); size_t const max_uncomp_block_size = std::reduce( uncompressed_data_sizes.begin(), uncompressed_data_sizes.end(), 0, thrust::maximum<size_t>()); size_t temp_size; status = nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_block_size, &temp_size); CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "Unable to get scratch size for snappy decompression"); rmm::device_buffer scratch(temp_size, stream); rmm::device_buffer decomp_block_data(uncompressed_data_size, stream); rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream); hostdevice_vector<size_t> uncompressed_data_offsets(num_blocks, stream); std::exclusive_scan(uncompressed_data_sizes.begin(), uncompressed_data_sizes.end(), uncompressed_data_offsets.begin(), 0); uncompressed_data_offsets.host_to_device(stream); thrust::tabulate(rmm::exec_policy(stream), uncompressed_data_ptrs.begin(), uncompressed_data_ptrs.end(), [off = uncompressed_data_offsets.device_ptr(), data = static_cast<std::byte*>(decomp_block_data.data())] __device__(int i) { return data + off[i]; }); rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream); rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream); status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.device_ptr(), compressed_data_sizes.device_ptr(), uncompressed_data_sizes.device_ptr(), actual_uncompressed_data_sizes.data(), num_blocks, scratch.data(), scratch.size(), uncompressed_data_ptrs.data(), statuses.data(), stream); CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "unable to perform snappy decompression"); CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream), uncompressed_data_sizes.d_begin(), uncompressed_data_sizes.d_end(), actual_uncompressed_data_sizes.begin()), "Mismatch in expected and actual decompressed size during snappy decompression"); CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream), statuses.begin(), statuses.end(), thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)), "Error during snappy decompression"); // Update blocks offsets & sizes to refer to uncompressed data for (size_t i = 0; i < num_blocks; i++) { meta.block_list[i].offset = uncompressed_data_offsets[i]; meta.block_list[i].size = uncompressed_data_sizes[i]; } return decomp_block_data; } else { CUDF_FAIL("Unsupported compression codec\n"); } } std::vector<column_buffer> decode_data(metadata& meta, rmm::device_buffer const& block_data, std::vector<std::pair<uint32_t, uint32_t>> const& dict, device_span<string_index_pair const> global_dictionary, size_t num_rows, std::vector<std::pair<int, std::string>> const& selection, std::vector<data_type> const& column_types, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto out_buffers = std::vector<column_buffer>(); for (size_t i = 0; i < column_types.size(); ++i) { auto col_idx = selection[i].first; bool is_nullable = (meta.columns[col_idx].schema_null_idx >= 0); out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, mr); } // Build gpu schema auto schema_desc = hostdevice_vector<gpu::schemadesc_s>(meta.schema.size(), stream); uint32_t min_row_data_size = 0; int skip_field_cnt = 0; for (size_t i = 0; i < meta.schema.size(); i++) { type_kind_e kind = meta.schema[i].kind; if (skip_field_cnt != 0) { // Exclude union and array members from min_row_data_size skip_field_cnt += meta.schema[i].num_children - 1; } else { switch (kind) { case type_union: case type_array: skip_field_cnt = meta.schema[i].num_children; // fall through case type_boolean: case type_int: case type_long: case type_bytes: case type_string: case type_enum: min_row_data_size += 1; break; case type_float: min_row_data_size += 4; break; case type_double: min_row_data_size += 8; break; default: break; } } if (kind == type_enum && !meta.schema[i].symbols.size()) { kind = type_int; } schema_desc[i].kind = kind; schema_desc[i].count = (kind == type_enum) ? 0 : static_cast<uint32_t>(meta.schema[i].num_children); schema_desc[i].dataptr = nullptr; CUDF_EXPECTS(kind != type_union || meta.schema[i].num_children < 2 || (meta.schema[i].num_children == 2 && (meta.schema[i + 1].kind == type_null || meta.schema[i + 2].kind == type_null)), "Union with non-null type not currently supported"); } std::vector<void*> valid_alias(out_buffers.size(), nullptr); for (size_t i = 0; i < out_buffers.size(); i++) { auto const col_idx = selection[i].first; int schema_data_idx = meta.columns[col_idx].schema_data_idx; int schema_null_idx = meta.columns[col_idx].schema_null_idx; schema_desc[schema_data_idx].dataptr = out_buffers[i].data(); if (schema_null_idx >= 0) { if (!schema_desc[schema_null_idx].dataptr) { schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask(); } else { valid_alias[i] = schema_desc[schema_null_idx].dataptr; } } if (meta.schema[schema_data_idx].kind == type_enum) { schema_desc[schema_data_idx].count = dict[i].first; } if (out_buffers[i].null_mask_size()) { cudf::detail::set_null_mask(out_buffers[i].null_mask(), 0, num_rows, true, stream); } } auto block_list = cudf::detail::make_device_uvector_async(meta.block_list, stream); schema_desc.host_to_device(stream); gpu::DecodeAvroColumnData(block_list, schema_desc.device_ptr(), global_dictionary, static_cast<uint8_t const*>(block_data.data()), static_cast<uint32_t>(schema_desc.size()), meta.num_rows, meta.skip_rows, min_row_data_size, stream); // Copy valid bits that are shared between columns for (size_t i = 0; i < out_buffers.size(); i++) { if (valid_alias[i] != nullptr) { CUDA_TRY(hipMemcpyAsync(out_buffers[i].null_mask(), valid_alias[i], out_buffers[i].null_mask_size(), hipMemcpyHostToDevice, stream.value())); } } schema_desc.device_to_host(stream, true); for (size_t i = 0; i < out_buffers.size(); i++) { auto const col_idx = selection[i].first; auto const schema_null_idx = meta.columns[col_idx].schema_null_idx; out_buffers[i].null_count() = (schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0; } return out_buffers; } table_with_metadata read_avro(std::unique_ptr<cudf::io::datasource>&& source, avro_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto skip_rows = options.get_skip_rows(); auto num_rows = options.get_num_rows(); num_rows = (num_rows != 0) ? num_rows : -1; std::vector<std::unique_ptr<column>> out_columns; table_metadata metadata_out; // Open the source Avro dataset metadata auto meta = metadata(source.get()); // Select and read partial metadata / schema within the subset of rows meta.init_and_select_rows(skip_rows, num_rows); // Select only columns required by the options auto selected_columns = meta.select_columns(options.get_columns()); if (selected_columns.size() != 0) { // Get a list of column data types std::vector<data_type> column_types; for (auto const& col : selected_columns) { auto& col_schema = meta.schema[meta.columns[col.first].schema_data_idx]; auto col_type = to_type_id(&col_schema); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type"); column_types.emplace_back(col_type); } if (meta.total_data_size > 0) { rmm::device_buffer block_data; if (source->is_device_read_preferred(meta.total_data_size)) { block_data = rmm::device_buffer{meta.total_data_size, stream}; auto read_bytes = source->device_read(meta.block_list[0].offset, meta.total_data_size, static_cast<uint8_t*>(block_data.data()), stream); block_data.resize(read_bytes, stream); } else { auto const buffer = source->host_read(meta.block_list[0].offset, meta.total_data_size); block_data = rmm::device_buffer{buffer->data(), buffer->size(), stream}; } if (meta.codec != "" && meta.codec != "null") { auto decomp_block_data = decompress_data(*source, meta, block_data, stream); block_data = std::move(decomp_block_data); } else { auto dst_ofs = meta.block_list[0].offset; for (size_t i = 0; i < meta.block_list.size(); i++) { meta.block_list[i].offset -= dst_ofs; } } size_t total_dictionary_entries = 0; size_t dictionary_data_size = 0; auto dict = std::vector<std::pair<uint32_t, uint32_t>>(column_types.size()); for (size_t i = 0; i < column_types.size(); ++i) { auto col_idx = selected_columns[i].first; auto& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx]; dict[i].first = static_cast<uint32_t>(total_dictionary_entries); dict[i].second = static_cast<uint32_t>(col_schema.symbols.size()); total_dictionary_entries += dict[i].second; for (auto const& sym : col_schema.symbols) { dictionary_data_size += sym.length(); } } auto d_global_dict = rmm::device_uvector<string_index_pair>(0, stream); auto d_global_dict_data = rmm::device_uvector<char>(0, stream); if (total_dictionary_entries > 0) { auto h_global_dict = std::vector<string_index_pair>(total_dictionary_entries); auto h_global_dict_data = std::vector<char>(dictionary_data_size); size_t dict_pos = 0; for (size_t i = 0; i < column_types.size(); ++i) { auto const col_idx = selected_columns[i].first; auto const& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx]; auto const col_dict_entries = &(h_global_dict[dict[i].first]); for (size_t j = 0; j < dict[i].second; j++) { auto const& symbols = col_schema.symbols[j]; auto const data_dst = h_global_dict_data.data() + dict_pos; auto const len = symbols.length(); col_dict_entries[j].first = data_dst; col_dict_entries[j].second = len; std::copy(symbols.c_str(), symbols.c_str() + len, data_dst); dict_pos += len; } } d_global_dict = cudf::detail::make_device_uvector_async(h_global_dict, stream); d_global_dict_data = cudf::detail::make_device_uvector_async(h_global_dict_data, stream); stream.synchronize(); } auto out_buffers = decode_data(meta, block_data, dict, d_global_dict, num_rows, selected_columns, column_types, stream, mr); for (size_t i = 0; i < column_types.size(); ++i) { out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, mr)); } } else { // Create empty columns for (size_t i = 0; i < column_types.size(); ++i) { out_columns.emplace_back(make_empty_column(column_types[i])); } } } // Return column names (must match order of returned columns) metadata_out.column_names.resize(selected_columns.size()); for (size_t i = 0; i < selected_columns.size(); i++) { metadata_out.column_names[i] = selected_columns[i].second; } // Return user metadata metadata_out.user_data = meta.user_data; return {std::make_unique<table>(std::move(out_columns)), std::move(metadata_out)}; } } // namespace avro } // namespace detail } // namespace io } // namespace cudf
e3376b2ccbc5335b04e465fc1617ef66e88bce9a.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "avro.h" #include "avro_gpu.h" #include "thrust/iterator/transform_output_iterator.h" #include <io/comp/gpuinflate.h> #include <io/utilities/column_buffer.hpp> #include <io/utilities/hostdevice_vector.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/io/datasource.hpp> #include <cudf/io/detail/avro.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <cudf/utilities/traits.hpp> #include <numeric> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <nvcomp/snappy.h> #include <memory> #include <string> #include <utility> #include <vector> using cudf::device_span; namespace cudf { namespace io { namespace detail { namespace avro { // Import functionality that's independent of legacy code using namespace cudf::io::avro; using namespace cudf::io; namespace { /** * @brief Function that translates Avro data kind to cuDF type enum */ type_id to_type_id(avro::schema_entry const* col) { switch (col->kind) { case avro::type_boolean: return type_id::BOOL8; case avro::type_int: return type_id::INT32; case avro::type_long: return type_id::INT64; case avro::type_float: return type_id::FLOAT32; case avro::type_double: return type_id::FLOAT64; case avro::type_bytes: case avro::type_string: return type_id::STRING; case avro::type_enum: return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32; default: return type_id::EMPTY; } } } // namespace /** * @brief A helper wrapper for Avro file metadata. Provides some additional * convenience methods for initializing and accessing the metadata and schema */ class metadata : public file_metadata { public: explicit metadata(datasource* const src) : source(src) {} /** * @brief Initializes the parser and filters down to a subset of rows * * @param[in,out] row_start Starting row of the selection * @param[in,out] row_count Total number of rows selected */ void init_and_select_rows(int& row_start, int& row_count) { auto const buffer = source->host_read(0, source->size()); avro::container pod(buffer->data(), buffer->size()); CUDF_EXPECTS(pod.parse(this, row_count, row_start), "Cannot parse metadata"); row_start = skip_rows; row_count = num_rows; } /** * @brief Filters and reduces down to a selection of columns * * @param[in] use_names List of column names to select * * @return List of column names */ auto select_columns(std::vector<std::string> use_names) { std::vector<std::pair<int, std::string>> selection; auto const num_avro_columns = static_cast<int>(columns.size()); if (!use_names.empty()) { int index = 0; for (auto const& use_name : use_names) { for (int i = 0; i < num_avro_columns; ++i, ++index) { if (index >= num_avro_columns) { index = 0; } if (columns[index].name == use_name && type_id::EMPTY != to_type_id(&schema[columns[index].schema_data_idx])) { selection.emplace_back(index, columns[index].name); index++; break; } } } CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns"); } else { for (int i = 0; i < num_avro_columns; ++i) { // Exclude array columns (unsupported) bool column_in_array = false; for (int parent_idx = schema[columns[i].schema_data_idx].parent_idx; parent_idx > 0; parent_idx = schema[parent_idx].parent_idx) { if (schema[parent_idx].kind == avro::type_array) { column_in_array = true; break; } } if (!column_in_array) { auto col_type = to_type_id(&schema[columns[i].schema_data_idx]); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type"); selection.emplace_back(i, columns[i].name); } } } return selection; } private: datasource* const source; }; rmm::device_buffer decompress_data(datasource& source, metadata& meta, rmm::device_buffer const& comp_block_data, rmm::cuda_stream_view stream) { if (meta.codec == "deflate") { size_t uncompressed_data_size = 0; auto inflate_in = hostdevice_vector<gpu_inflate_input_s>(meta.block_list.size(), stream); auto inflate_out = hostdevice_vector<gpu_inflate_status_s>(meta.block_list.size(), stream); // Guess an initial maximum uncompressed block size uint32_t initial_blk_len = (meta.max_block_size * 2 + 0xfff) & ~0xfff; uncompressed_data_size = initial_blk_len * meta.block_list.size(); for (size_t i = 0; i < inflate_in.size(); ++i) { inflate_in[i].dstSize = initial_blk_len; } rmm::device_buffer decomp_block_data(uncompressed_data_size, stream); auto const base_offset = meta.block_list[0].offset; for (size_t i = 0, dst_pos = 0; i < meta.block_list.size(); i++) { auto const src_pos = meta.block_list[i].offset - base_offset; inflate_in[i].srcDevice = static_cast<uint8_t const*>(comp_block_data.data()) + src_pos; inflate_in[i].srcSize = meta.block_list[i].size; inflate_in[i].dstDevice = static_cast<uint8_t*>(decomp_block_data.data()) + dst_pos; // Update blocks offsets & sizes to refer to uncompressed data meta.block_list[i].offset = dst_pos; meta.block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize); dst_pos += meta.block_list[i].size; } for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) { inflate_in.host_to_device(stream); CUDA_TRY( cudaMemsetAsync(inflate_out.device_ptr(), 0, inflate_out.memory_size(), stream.value())); CUDA_TRY(gpuinflate( inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), 0, stream)); inflate_out.device_to_host(stream, true); // Check if larger output is required, as it's not known ahead of time if (loop_cnt == 0) { size_t actual_uncompressed_size = 0; for (size_t i = 0; i < meta.block_list.size(); i++) { // If error status is 1 (buffer too small), the `bytes_written` field // is actually contains the uncompressed data size if (inflate_out[i].status == 1 && inflate_out[i].bytes_written > inflate_in[i].dstSize) { inflate_in[i].dstSize = inflate_out[i].bytes_written; } actual_uncompressed_size += inflate_in[i].dstSize; } if (actual_uncompressed_size > uncompressed_data_size) { decomp_block_data.resize(actual_uncompressed_size, stream); for (size_t i = 0, dst_pos = 0; i < meta.block_list.size(); i++) { auto dst_base = static_cast<uint8_t*>(decomp_block_data.data()); inflate_in[i].dstDevice = dst_base + dst_pos; meta.block_list[i].offset = dst_pos; meta.block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize); dst_pos += meta.block_list[i].size; } } else { break; } } } return decomp_block_data; } else if (meta.codec == "snappy") { size_t const num_blocks = meta.block_list.size(); // comp_block_data contains contents of the avro file starting from the first block, excluding // file header. meta.block_list[i].offset refers to offset of block i in the file, including // file header. // Find ptrs to each compressed block in comp_block_data by removing header offset. hostdevice_vector<void const*> compressed_data_ptrs(num_blocks, stream); std::transform(meta.block_list.begin(), meta.block_list.end(), compressed_data_ptrs.host_ptr(), [&](auto const& block) { return static_cast<std::byte const*>(comp_block_data.data()) + (block.offset - meta.block_list[0].offset); }); compressed_data_ptrs.host_to_device(stream); hostdevice_vector<size_t> compressed_data_sizes(num_blocks, stream); std::transform(meta.block_list.begin(), meta.block_list.end(), compressed_data_sizes.host_ptr(), [](auto const& block) { return block.size; }); compressed_data_sizes.host_to_device(stream); hostdevice_vector<size_t> uncompressed_data_sizes(num_blocks, stream); nvcompStatus_t status = nvcompBatchedSnappyGetDecompressSizeAsync(compressed_data_ptrs.device_ptr(), compressed_data_sizes.device_ptr(), uncompressed_data_sizes.device_ptr(), num_blocks, stream.value()); CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "Unable to get uncompressed sizes for snappy compressed blocks"); uncompressed_data_sizes.device_to_host(stream, true); size_t const uncompressed_data_size = std::reduce(uncompressed_data_sizes.begin(), uncompressed_data_sizes.end()); size_t const max_uncomp_block_size = std::reduce( uncompressed_data_sizes.begin(), uncompressed_data_sizes.end(), 0, thrust::maximum<size_t>()); size_t temp_size; status = nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_block_size, &temp_size); CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "Unable to get scratch size for snappy decompression"); rmm::device_buffer scratch(temp_size, stream); rmm::device_buffer decomp_block_data(uncompressed_data_size, stream); rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream); hostdevice_vector<size_t> uncompressed_data_offsets(num_blocks, stream); std::exclusive_scan(uncompressed_data_sizes.begin(), uncompressed_data_sizes.end(), uncompressed_data_offsets.begin(), 0); uncompressed_data_offsets.host_to_device(stream); thrust::tabulate(rmm::exec_policy(stream), uncompressed_data_ptrs.begin(), uncompressed_data_ptrs.end(), [off = uncompressed_data_offsets.device_ptr(), data = static_cast<std::byte*>(decomp_block_data.data())] __device__(int i) { return data + off[i]; }); rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream); rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream); status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.device_ptr(), compressed_data_sizes.device_ptr(), uncompressed_data_sizes.device_ptr(), actual_uncompressed_data_sizes.data(), num_blocks, scratch.data(), scratch.size(), uncompressed_data_ptrs.data(), statuses.data(), stream); CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "unable to perform snappy decompression"); CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream), uncompressed_data_sizes.d_begin(), uncompressed_data_sizes.d_end(), actual_uncompressed_data_sizes.begin()), "Mismatch in expected and actual decompressed size during snappy decompression"); CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream), statuses.begin(), statuses.end(), thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)), "Error during snappy decompression"); // Update blocks offsets & sizes to refer to uncompressed data for (size_t i = 0; i < num_blocks; i++) { meta.block_list[i].offset = uncompressed_data_offsets[i]; meta.block_list[i].size = uncompressed_data_sizes[i]; } return decomp_block_data; } else { CUDF_FAIL("Unsupported compression codec\n"); } } std::vector<column_buffer> decode_data(metadata& meta, rmm::device_buffer const& block_data, std::vector<std::pair<uint32_t, uint32_t>> const& dict, device_span<string_index_pair const> global_dictionary, size_t num_rows, std::vector<std::pair<int, std::string>> const& selection, std::vector<data_type> const& column_types, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto out_buffers = std::vector<column_buffer>(); for (size_t i = 0; i < column_types.size(); ++i) { auto col_idx = selection[i].first; bool is_nullable = (meta.columns[col_idx].schema_null_idx >= 0); out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, mr); } // Build gpu schema auto schema_desc = hostdevice_vector<gpu::schemadesc_s>(meta.schema.size(), stream); uint32_t min_row_data_size = 0; int skip_field_cnt = 0; for (size_t i = 0; i < meta.schema.size(); i++) { type_kind_e kind = meta.schema[i].kind; if (skip_field_cnt != 0) { // Exclude union and array members from min_row_data_size skip_field_cnt += meta.schema[i].num_children - 1; } else { switch (kind) { case type_union: case type_array: skip_field_cnt = meta.schema[i].num_children; // fall through case type_boolean: case type_int: case type_long: case type_bytes: case type_string: case type_enum: min_row_data_size += 1; break; case type_float: min_row_data_size += 4; break; case type_double: min_row_data_size += 8; break; default: break; } } if (kind == type_enum && !meta.schema[i].symbols.size()) { kind = type_int; } schema_desc[i].kind = kind; schema_desc[i].count = (kind == type_enum) ? 0 : static_cast<uint32_t>(meta.schema[i].num_children); schema_desc[i].dataptr = nullptr; CUDF_EXPECTS(kind != type_union || meta.schema[i].num_children < 2 || (meta.schema[i].num_children == 2 && (meta.schema[i + 1].kind == type_null || meta.schema[i + 2].kind == type_null)), "Union with non-null type not currently supported"); } std::vector<void*> valid_alias(out_buffers.size(), nullptr); for (size_t i = 0; i < out_buffers.size(); i++) { auto const col_idx = selection[i].first; int schema_data_idx = meta.columns[col_idx].schema_data_idx; int schema_null_idx = meta.columns[col_idx].schema_null_idx; schema_desc[schema_data_idx].dataptr = out_buffers[i].data(); if (schema_null_idx >= 0) { if (!schema_desc[schema_null_idx].dataptr) { schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask(); } else { valid_alias[i] = schema_desc[schema_null_idx].dataptr; } } if (meta.schema[schema_data_idx].kind == type_enum) { schema_desc[schema_data_idx].count = dict[i].first; } if (out_buffers[i].null_mask_size()) { cudf::detail::set_null_mask(out_buffers[i].null_mask(), 0, num_rows, true, stream); } } auto block_list = cudf::detail::make_device_uvector_async(meta.block_list, stream); schema_desc.host_to_device(stream); gpu::DecodeAvroColumnData(block_list, schema_desc.device_ptr(), global_dictionary, static_cast<uint8_t const*>(block_data.data()), static_cast<uint32_t>(schema_desc.size()), meta.num_rows, meta.skip_rows, min_row_data_size, stream); // Copy valid bits that are shared between columns for (size_t i = 0; i < out_buffers.size(); i++) { if (valid_alias[i] != nullptr) { CUDA_TRY(cudaMemcpyAsync(out_buffers[i].null_mask(), valid_alias[i], out_buffers[i].null_mask_size(), cudaMemcpyHostToDevice, stream.value())); } } schema_desc.device_to_host(stream, true); for (size_t i = 0; i < out_buffers.size(); i++) { auto const col_idx = selection[i].first; auto const schema_null_idx = meta.columns[col_idx].schema_null_idx; out_buffers[i].null_count() = (schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0; } return out_buffers; } table_with_metadata read_avro(std::unique_ptr<cudf::io::datasource>&& source, avro_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto skip_rows = options.get_skip_rows(); auto num_rows = options.get_num_rows(); num_rows = (num_rows != 0) ? num_rows : -1; std::vector<std::unique_ptr<column>> out_columns; table_metadata metadata_out; // Open the source Avro dataset metadata auto meta = metadata(source.get()); // Select and read partial metadata / schema within the subset of rows meta.init_and_select_rows(skip_rows, num_rows); // Select only columns required by the options auto selected_columns = meta.select_columns(options.get_columns()); if (selected_columns.size() != 0) { // Get a list of column data types std::vector<data_type> column_types; for (auto const& col : selected_columns) { auto& col_schema = meta.schema[meta.columns[col.first].schema_data_idx]; auto col_type = to_type_id(&col_schema); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type"); column_types.emplace_back(col_type); } if (meta.total_data_size > 0) { rmm::device_buffer block_data; if (source->is_device_read_preferred(meta.total_data_size)) { block_data = rmm::device_buffer{meta.total_data_size, stream}; auto read_bytes = source->device_read(meta.block_list[0].offset, meta.total_data_size, static_cast<uint8_t*>(block_data.data()), stream); block_data.resize(read_bytes, stream); } else { auto const buffer = source->host_read(meta.block_list[0].offset, meta.total_data_size); block_data = rmm::device_buffer{buffer->data(), buffer->size(), stream}; } if (meta.codec != "" && meta.codec != "null") { auto decomp_block_data = decompress_data(*source, meta, block_data, stream); block_data = std::move(decomp_block_data); } else { auto dst_ofs = meta.block_list[0].offset; for (size_t i = 0; i < meta.block_list.size(); i++) { meta.block_list[i].offset -= dst_ofs; } } size_t total_dictionary_entries = 0; size_t dictionary_data_size = 0; auto dict = std::vector<std::pair<uint32_t, uint32_t>>(column_types.size()); for (size_t i = 0; i < column_types.size(); ++i) { auto col_idx = selected_columns[i].first; auto& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx]; dict[i].first = static_cast<uint32_t>(total_dictionary_entries); dict[i].second = static_cast<uint32_t>(col_schema.symbols.size()); total_dictionary_entries += dict[i].second; for (auto const& sym : col_schema.symbols) { dictionary_data_size += sym.length(); } } auto d_global_dict = rmm::device_uvector<string_index_pair>(0, stream); auto d_global_dict_data = rmm::device_uvector<char>(0, stream); if (total_dictionary_entries > 0) { auto h_global_dict = std::vector<string_index_pair>(total_dictionary_entries); auto h_global_dict_data = std::vector<char>(dictionary_data_size); size_t dict_pos = 0; for (size_t i = 0; i < column_types.size(); ++i) { auto const col_idx = selected_columns[i].first; auto const& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx]; auto const col_dict_entries = &(h_global_dict[dict[i].first]); for (size_t j = 0; j < dict[i].second; j++) { auto const& symbols = col_schema.symbols[j]; auto const data_dst = h_global_dict_data.data() + dict_pos; auto const len = symbols.length(); col_dict_entries[j].first = data_dst; col_dict_entries[j].second = len; std::copy(symbols.c_str(), symbols.c_str() + len, data_dst); dict_pos += len; } } d_global_dict = cudf::detail::make_device_uvector_async(h_global_dict, stream); d_global_dict_data = cudf::detail::make_device_uvector_async(h_global_dict_data, stream); stream.synchronize(); } auto out_buffers = decode_data(meta, block_data, dict, d_global_dict, num_rows, selected_columns, column_types, stream, mr); for (size_t i = 0; i < column_types.size(); ++i) { out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, mr)); } } else { // Create empty columns for (size_t i = 0; i < column_types.size(); ++i) { out_columns.emplace_back(make_empty_column(column_types[i])); } } } // Return column names (must match order of returned columns) metadata_out.column_names.resize(selected_columns.size()); for (size_t i = 0; i < selected_columns.size(); i++) { metadata_out.column_names[i] = selected_columns[i].second; } // Return user metadata metadata_out.user_data = meta.user_data; return {std::make_unique<table>(std::move(out_columns)), std::move(metadata_out)}; } } // namespace avro } // namespace detail } // namespace io } // namespace cudf
465a0a06a8f66297d224f7979ffda5b8d887d2b9.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "fully_connected_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "neural_network_cublas_exception.h" #include "neural_network_cuda_exception.h" #include "../convolution_layer.h" extern __shared__ float arr_sh[]; template<bool different_input> __global__ void fully_connected_upd_kernel( float * output, const float * __restrict input, const float * weights, int input_neuron_count, int output_neuron_count, int min_iteration_count) { int thread_id = threadIdx.x; int output_neuron_id = blockIdx.y; int entry_id = blockIdx.z; int threadblock_size = blockDim.x; float sum = 0.0F; const float * current_input = input + (int)(different_input ? (entry_id * input_neuron_count): 0); const float * current_weights = weights + (int)((entry_id * output_neuron_count + output_neuron_id) * input_neuron_count); int current_input_neuron_id = thread_id; for(int i = 0; i < min_iteration_count; ++i) { sum += current_input[current_input_neuron_id] * current_weights[current_input_neuron_id]; current_input_neuron_id += threadblock_size; } if (current_input_neuron_id < input_neuron_count) sum += current_input[current_input_neuron_id] * current_weights[current_input_neuron_id]; int lane_id = thread_id & 31; #if __CUDA_ARCH__ >= 300 #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { sum += __shfl_down(sum, tx); } #else volatile float * arr = arr_sh; arr[thread_id] = sum; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { if (lane_id < tx) arr[thread_id] += arr[thread_id + tx]; } sum = arr[thread_id]; #endif if (lane_id == 0) atomicAdd(output + entry_id * output_neuron_count + output_neuron_id, sum); } template<bool single_output_group_count> __global__ void fully_connected_deriviative_upd_kernel( float * __restrict input_errors, const float * __restrict output_errors, const float * __restrict weights, int input_neuron_count, int output_neuron_count, int output_group_count, int max_iteration_count, int entry_count) { int input_neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_group_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; bool in_bounds = (input_neuron_id < input_neuron_count) && (output_group_id < output_group_count) && (entry_id < entry_count); if (in_bounds) { int output_offset = entry_id * output_neuron_count + output_group_id; int weights_offset = (entry_id * output_neuron_count + output_group_id) * input_neuron_count + input_neuron_id; int iteration_count = ((max_iteration_count - 1) * output_group_count + output_group_id < output_neuron_count) ? max_iteration_count : max_iteration_count - 1; float sum = 0.0F; #pragma unroll 4 for(int i = 0; i < iteration_count; ++i) { sum += output_errors[output_offset] * weights[weights_offset]; weights_offset += input_neuron_count * output_group_count; output_offset += output_group_count; } float * current_input = input_errors + entry_id * input_neuron_count + input_neuron_id; if (single_output_group_count) { *current_input = sum; } else { atomicAdd(current_input, sum); } } } __global__ void fully_connected_update_biases_upd_kernel( float * __restrict biases, const float * __restrict output_errors, const float * __restrict learning_rate, int output_neuron_count, int entry_count) { int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int entry_id = blockIdx.y * blockDim.y + threadIdx.y; bool in_bounds = (output_neuron_id < output_neuron_count) && (entry_id < entry_count); if (in_bounds) { int offset = entry_id * output_neuron_count + output_neuron_id; float upd_val = output_errors[offset] * learning_rate[offset] + biases[offset]; biases[offset] = upd_val; } } template<bool different_input> __global__ void fully_connected_update_weights_upd_kernel( float * __restrict weights, const float * __restrict input_neurons, const float * __restrict output_errors, const float * __restrict learning_rate, int input_neuron_count, int output_neuron_count, int entry_count) { int input_neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_neuron_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; bool in_bounds = (input_neuron_id < input_neuron_count) && (output_neuron_id < output_neuron_count) && (entry_id < entry_count); if (in_bounds) { int input_offset = (different_input ? entry_id * input_neuron_count : 0) + input_neuron_id; int offset = (entry_id * output_neuron_count + output_neuron_id) * input_neuron_count + input_neuron_id; float upd_val = input_neurons[input_offset] * output_errors[entry_id * output_neuron_count + output_neuron_id] * learning_rate[offset] + weights[offset]; weights[offset] = upd_val; } } namespace nnforge { namespace cuda { fully_connected_layer_updater_cuda::fully_connected_layer_updater_cuda() { } fully_connected_layer_updater_cuda::~fully_connected_layer_updater_cuda() { } void fully_connected_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { cuda_util::copy_buffer( *cuda_config, *data[1], *output_neurons_buffer, output_elem_count_per_entry * entry_count, stream_id); int threadblock_size = get_threadblock_size_forward(input_elem_count_per_entry); dim3 grid_size(1, output_elem_count_per_entry, entry_count); dim3 block_size(threadblock_size, 1, 1); int smem_size = (cuda_config->get_compute_capability() >= 300) ? 0 : (threadblock_size * sizeof(float)); int min_iteration_count = input_elem_count_per_entry / threadblock_size; if (different_input) { hipLaunchKernelGGL(( fully_connected_upd_kernel<true>), dim3(grid_size), dim3(block_size), smem_size, stream_id, *output_neurons_buffer, *input_neurons_buffer, *data[0], input_elem_count_per_entry, output_elem_count_per_entry, min_iteration_count); } else { hipLaunchKernelGGL(( fully_connected_upd_kernel<false>), dim3(grid_size), dim3(block_size), smem_size, stream_id, *output_neurons_buffer, (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), *data[0], input_elem_count_per_entry, output_elem_count_per_entry, min_iteration_count); } } void fully_connected_layer_updater_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { if (!different_input) throw neural_network_exception("fully_connected_layer_updater_cuda is not able to backprop to the same input"); int output_group_count = cuda_util::get_group_count( *cuda_config, input_elem_count_per_entry * entry_count, output_elem_count_per_entry); int max_iteration_count = (output_elem_count_per_entry + output_group_count - 1) / output_group_count; if (output_group_count > 1) cuda_util::set_with_value( *cuda_config, *input_errors_buffer, 0.0F, input_elem_count_per_entry * entry_count, stream_id); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_entry, output_group_count, entry_count); if (output_group_count == 1) hipLaunchKernelGGL(( fully_connected_deriviative_upd_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *output_errors_buffer, *data[0], input_elem_count_per_entry, output_elem_count_per_entry, output_group_count, max_iteration_count, entry_count); else hipLaunchKernelGGL(( fully_connected_deriviative_upd_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *output_errors_buffer, *data[0], input_elem_count_per_entry, output_elem_count_per_entry, output_group_count, max_iteration_count, entry_count); } void fully_connected_layer_updater_cuda::enqueue_update_weights( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& learning_rate, cuda_linear_buffer_device_smart_ptr output_errors_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { // Update biases { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_entry, entry_count, 1); hipLaunchKernelGGL(( fully_connected_update_biases_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[1], *output_errors_buffer, *learning_rate[1], output_elem_count_per_entry, entry_count); } std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access_x_aligned( *cuda_config, input_elem_count_per_entry, output_elem_count_per_entry, entry_count); if (different_input) { hipLaunchKernelGGL(( fully_connected_update_weights_upd_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[0], *input_neurons_buffer, *output_errors_buffer, *learning_rate[0], input_elem_count_per_entry, output_elem_count_per_entry, entry_count); } else { hipLaunchKernelGGL(( fully_connected_update_weights_upd_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[0], (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), *output_errors_buffer, *learning_rate[0], input_elem_count_per_entry, output_elem_count_per_entry, entry_count); } } bool fully_connected_layer_updater_cuda::is_in_place_backprop() const { return false; } int fully_connected_layer_updater_cuda::get_threadblock_size_forward(int input_neuron_count) { int threadblock_size; if (input_neuron_count < 128) { threadblock_size = (input_neuron_count + 32 - 1) / 32 * 32; } else { int threadblock_count = (input_neuron_count + 128 - 1) / 128; threadblock_size = (input_neuron_count + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
465a0a06a8f66297d224f7979ffda5b8d887d2b9.cu
/* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "fully_connected_layer_updater_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "neural_network_cublas_exception.h" #include "neural_network_cuda_exception.h" #include "../convolution_layer.h" extern __shared__ float arr_sh[]; template<bool different_input> __global__ void fully_connected_upd_kernel( float * output, const float * __restrict input, const float * weights, int input_neuron_count, int output_neuron_count, int min_iteration_count) { int thread_id = threadIdx.x; int output_neuron_id = blockIdx.y; int entry_id = blockIdx.z; int threadblock_size = blockDim.x; float sum = 0.0F; const float * current_input = input + (int)(different_input ? (entry_id * input_neuron_count): 0); const float * current_weights = weights + (int)((entry_id * output_neuron_count + output_neuron_id) * input_neuron_count); int current_input_neuron_id = thread_id; for(int i = 0; i < min_iteration_count; ++i) { sum += current_input[current_input_neuron_id] * current_weights[current_input_neuron_id]; current_input_neuron_id += threadblock_size; } if (current_input_neuron_id < input_neuron_count) sum += current_input[current_input_neuron_id] * current_weights[current_input_neuron_id]; int lane_id = thread_id & 31; #if __CUDA_ARCH__ >= 300 #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { sum += __shfl_down(sum, tx); } #else volatile float * arr = arr_sh; arr[thread_id] = sum; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) { if (lane_id < tx) arr[thread_id] += arr[thread_id + tx]; } sum = arr[thread_id]; #endif if (lane_id == 0) atomicAdd(output + entry_id * output_neuron_count + output_neuron_id, sum); } template<bool single_output_group_count> __global__ void fully_connected_deriviative_upd_kernel( float * __restrict input_errors, const float * __restrict output_errors, const float * __restrict weights, int input_neuron_count, int output_neuron_count, int output_group_count, int max_iteration_count, int entry_count) { int input_neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_group_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; bool in_bounds = (input_neuron_id < input_neuron_count) && (output_group_id < output_group_count) && (entry_id < entry_count); if (in_bounds) { int output_offset = entry_id * output_neuron_count + output_group_id; int weights_offset = (entry_id * output_neuron_count + output_group_id) * input_neuron_count + input_neuron_id; int iteration_count = ((max_iteration_count - 1) * output_group_count + output_group_id < output_neuron_count) ? max_iteration_count : max_iteration_count - 1; float sum = 0.0F; #pragma unroll 4 for(int i = 0; i < iteration_count; ++i) { sum += output_errors[output_offset] * weights[weights_offset]; weights_offset += input_neuron_count * output_group_count; output_offset += output_group_count; } float * current_input = input_errors + entry_id * input_neuron_count + input_neuron_id; if (single_output_group_count) { *current_input = sum; } else { atomicAdd(current_input, sum); } } } __global__ void fully_connected_update_biases_upd_kernel( float * __restrict biases, const float * __restrict output_errors, const float * __restrict learning_rate, int output_neuron_count, int entry_count) { int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int entry_id = blockIdx.y * blockDim.y + threadIdx.y; bool in_bounds = (output_neuron_id < output_neuron_count) && (entry_id < entry_count); if (in_bounds) { int offset = entry_id * output_neuron_count + output_neuron_id; float upd_val = output_errors[offset] * learning_rate[offset] + biases[offset]; biases[offset] = upd_val; } } template<bool different_input> __global__ void fully_connected_update_weights_upd_kernel( float * __restrict weights, const float * __restrict input_neurons, const float * __restrict output_errors, const float * __restrict learning_rate, int input_neuron_count, int output_neuron_count, int entry_count) { int input_neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_neuron_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; bool in_bounds = (input_neuron_id < input_neuron_count) && (output_neuron_id < output_neuron_count) && (entry_id < entry_count); if (in_bounds) { int input_offset = (different_input ? entry_id * input_neuron_count : 0) + input_neuron_id; int offset = (entry_id * output_neuron_count + output_neuron_id) * input_neuron_count + input_neuron_id; float upd_val = input_neurons[input_offset] * output_errors[entry_id * output_neuron_count + output_neuron_id] * learning_rate[offset] + weights[offset]; weights[offset] = upd_val; } } namespace nnforge { namespace cuda { fully_connected_layer_updater_cuda::fully_connected_layer_updater_cuda() { } fully_connected_layer_updater_cuda::~fully_connected_layer_updater_cuda() { } void fully_connected_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { cuda_util::copy_buffer( *cuda_config, *data[1], *output_neurons_buffer, output_elem_count_per_entry * entry_count, stream_id); int threadblock_size = get_threadblock_size_forward(input_elem_count_per_entry); dim3 grid_size(1, output_elem_count_per_entry, entry_count); dim3 block_size(threadblock_size, 1, 1); int smem_size = (cuda_config->get_compute_capability() >= 300) ? 0 : (threadblock_size * sizeof(float)); int min_iteration_count = input_elem_count_per_entry / threadblock_size; if (different_input) { fully_connected_upd_kernel<true><<<grid_size, block_size, smem_size, stream_id>>>( *output_neurons_buffer, *input_neurons_buffer, *data[0], input_elem_count_per_entry, output_elem_count_per_entry, min_iteration_count); } else { fully_connected_upd_kernel<false><<<grid_size, block_size, smem_size, stream_id>>>( *output_neurons_buffer, (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), *data[0], input_elem_count_per_entry, output_elem_count_per_entry, min_iteration_count); } } void fully_connected_layer_updater_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { if (!different_input) throw neural_network_exception("fully_connected_layer_updater_cuda is not able to backprop to the same input"); int output_group_count = cuda_util::get_group_count( *cuda_config, input_elem_count_per_entry * entry_count, output_elem_count_per_entry); int max_iteration_count = (output_elem_count_per_entry + output_group_count - 1) / output_group_count; if (output_group_count > 1) cuda_util::set_with_value( *cuda_config, *input_errors_buffer, 0.0F, input_elem_count_per_entry * entry_count, stream_id); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_entry, output_group_count, entry_count); if (output_group_count == 1) fully_connected_deriviative_upd_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *output_errors_buffer, *data[0], input_elem_count_per_entry, output_elem_count_per_entry, output_group_count, max_iteration_count, entry_count); else fully_connected_deriviative_upd_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *output_errors_buffer, *data[0], input_elem_count_per_entry, output_elem_count_per_entry, output_group_count, max_iteration_count, entry_count); } void fully_connected_layer_updater_cuda::enqueue_update_weights( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& learning_rate, cuda_linear_buffer_device_smart_ptr output_errors_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { // Update biases { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_entry, entry_count, 1); fully_connected_update_biases_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *data[1], *output_errors_buffer, *learning_rate[1], output_elem_count_per_entry, entry_count); } std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access_x_aligned( *cuda_config, input_elem_count_per_entry, output_elem_count_per_entry, entry_count); if (different_input) { fully_connected_update_weights_upd_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *data[0], *input_neurons_buffer, *output_errors_buffer, *learning_rate[0], input_elem_count_per_entry, output_elem_count_per_entry, entry_count); } else { fully_connected_update_weights_upd_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *data[0], (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), *output_errors_buffer, *learning_rate[0], input_elem_count_per_entry, output_elem_count_per_entry, entry_count); } } bool fully_connected_layer_updater_cuda::is_in_place_backprop() const { return false; } int fully_connected_layer_updater_cuda::get_threadblock_size_forward(int input_neuron_count) { int threadblock_size; if (input_neuron_count < 128) { threadblock_size = (input_neuron_count + 32 - 1) / 32 * 32; } else { int threadblock_count = (input_neuron_count + 128 - 1) / 128; threadblock_size = (input_neuron_count + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
6443ba35e1d7a98c6735221e528e5aea943c6029.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "VecSoa.cuh" #include "common_hip.cuh" #include "Some.cuh" #include "Collision.cuh" namespace collision { __device__ double d_weight(double distance) { int i = 0; double weight_ij = 0.0; //double re = r_e; if (distance >= _r_e){ weight_ij = 0.0; } else{ weight_ij = (_r_e / distance) - 1.0; //weight_ij = pow((1.0 - (distance / re)), 2); //weight_ij = pow((distance / _r_e) - 1.0, 2.0); } return weight_ij; } __global__ void calcCollision( double* pos_x, double* pos_y, double* pos_z, double* vel_x, double* vel_y, double* vel_z, double* acc_x, double* acc_y, double* acc_z, int* type, double* afterVelocity_x, double* afterVelocity_y, double* afterVelocity_z, unsigned int* neighborIndex, unsigned int* neighborNum, unsigned int totalParticle) { int j = 0; //qp double x_ij; double y_ij; double z_ij; int ix, iy, iz; int jx, jy, jz; int jBacket; double pre_distance; double distance; double w; double limit2 = pow(_coll_limit, 2); double ForceDt; double mi, mj; int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < totalParticle){ if (type[threadId] == _fluid || type[threadId] == _air){ //6/21 //6/21 if (type[threadId] == _fluid){ mi = _density; } else if (type[threadId] == _air){ mi = _air_density; } //mi = _density; const double3 Position_i = make_double3(pos_x[threadId], pos_y[threadId], pos_z[threadId]); const double3 Velocity_i = make_double3(vel_x[threadId], vel_y[threadId], vel_z[threadId]); double3 Vector = make_double3(vel_x[threadId], vel_y[threadId], vel_z[threadId]); //for (j = 0; j < totalParticle; j++){ for (int l = 0; l < neighborNum[threadId]; l++){ const int j = neighborIndex[threadId * _max_neighbor + l]; if (j == threadId || type[j] == _ghost) continue; double3 Position_j = make_double3(pos_x[j], pos_y[j], pos_z[j]); double3 Velocity_j = make_double3(vel_x[j], vel_y[j], vel_z[j]); x_ij = Position_j.x - Position_i.x; y_ij = Position_j.y - Position_i.y; z_ij = Position_j.z - Position_i.z; pre_distance = (x_ij*x_ij) + (y_ij*y_ij) + (z_ij*z_ij); if (pre_distance < limit2){ distance = sqrt(pre_distance); if (distance == 0.0) printf("calcCollision::indexNumber = %d\n", threadId); ForceDt = (Velocity_i.x - Velocity_j.x)*(x_ij / distance) + (Velocity_i.y - Velocity_j.y)*(y_ij / distance) + (Velocity_i.z - Velocity_j.z)*(z_ij / distance); if (ForceDt > 0.0){ //6/21 if (type[threadId] == _fluid){ mj = _density; } else if (type[threadId] == _air){ mj = _air_density; } //mj = _density; ForceDt *= _col_rate * mi * mj / (mi + mj); Vector.x -= (ForceDt / mi)*(x_ij / distance); Vector.y -= (ForceDt / mi)*(y_ij / distance); Vector.z -= (ForceDt / mi)*(z_ij / distance); //printf("Collision\n"); } } } __syncthreads(); afterVelocity_x[threadId] = Vector.x; afterVelocity_y[threadId] = Vector.y; afterVelocity_z[threadId] = Vector.z; } } __syncthreads(); if (threadId < totalParticle){ if (type[threadId] == _fluid || type[threadId] == _air){ //6/21 pos_x[threadId] += (afterVelocity_x[threadId] - vel_x[threadId])*_dt; pos_y[threadId] += (afterVelocity_y[threadId] - vel_y[threadId])*_dt; pos_z[threadId] += (afterVelocity_z[threadId] - vel_z[threadId])*_dt; vel_x[threadId] = afterVelocity_x[threadId]; vel_y[threadId] = afterVelocity_y[threadId]; vel_z[threadId] = afterVelocity_z[threadId]; } } } __global__ void HighOrdercalcCollision( double* pos_x, double* pos_y, double* pos_z, double* vel_x, double* vel_y, double* vel_z, double* acc_x, double* acc_y, double* acc_z, int* type, double* afterVelocity_x, double* afterVelocity_y, double* afterVelocity_z, unsigned int* neighborIndex, unsigned int* neighborNum, unsigned int totalParticle) { int j = 0; //qp double x_ij; double y_ij; double z_ij; int ix, iy, iz; int jx, jy, jz; int jBacket; double pre_distance; double distance; double w; double limit2 = pow(_coll_limit, 2); double ForceDt; double mi, mj; int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < totalParticle){ if (type[threadId] == _fluid || type[threadId] == _air){ //6/21 //printf("%d\n", threadId); //6/21 if (type[threadId] == _fluid){ mi = _density; } else if (type[threadId] == _air){ mi = _air_density; } //mi = _density; const double3 Position_i = make_double3(pos_x[threadId], pos_y[threadId], pos_z[threadId]); const double3 Velocity_i = make_double3(vel_x[threadId], vel_y[threadId], vel_z[threadId]); double3 Vector = make_double3(vel_x[threadId], vel_y[threadId], vel_z[threadId]); //for (j = 0; j < totalParticle; j++){ for (int l = 0; l < neighborNum[threadId]; l++){ const int j = neighborIndex[threadId * _max_neighbor + l]; if (j == threadId || type[j] == _ghost) continue; double3 Position_j = make_double3(pos_x[j], pos_y[j], pos_z[j]); double3 Velocity_j = make_double3(vel_x[j], vel_y[j], vel_z[j]); x_ij = Position_j.x - Position_i.x; y_ij = Position_j.y - Position_i.y; z_ij = Position_j.z - Position_i.z; pre_distance = (x_ij*x_ij) + (y_ij*y_ij) + (z_ij*z_ij); if (pre_distance < limit2){ distance = sqrt(pre_distance); if (distance == 0.0) printf("calcCollision::indexNumber = %d\n", threadId); ForceDt = (Velocity_i.x - Velocity_j.x)*(x_ij / distance) + (Velocity_i.y - Velocity_j.y)*(y_ij / distance) + (Velocity_i.z - Velocity_j.z)*(z_ij / distance); if (ForceDt > 0.0){ //6/21 if (type[threadId] == _fluid){ mj = _density; } else if (type[threadId] == _air){ mj = _air_density; } //mj = _density; ForceDt *= _col_rate * mi * mj / (mi + mj); Vector.x -= (ForceDt / mi)*(x_ij / distance); Vector.y -= (ForceDt / mi)*(y_ij / distance); Vector.z -= (ForceDt / mi)*(z_ij / distance); //printf("Collision\n"); } } } __syncthreads(); afterVelocity_x[threadId] = Vector.x; afterVelocity_y[threadId] = Vector.y; afterVelocity_z[threadId] = Vector.z; } } //__syncthreads(); // 6/9 if (threadId < totalParticle){ if (type[threadId] == _fluid || type[threadId] == _air){ //6/21 vel_x[threadId] = afterVelocity_x[threadId]; vel_y[threadId] = afterVelocity_y[threadId]; vel_z[threadId] = afterVelocity_z[threadId]; } } } }; Collision::Collision(const double re, const double coll_limit, const double density, const double air_density, const double d_t, //air_density 6/21 const int fluid, const int wall, const int dwall, const int air, const int ghost, //air 6/21 const int max_neighbor, const double col_rate) :_r_e(re), _coll_limit(coll_limit), _density(density), _air_density(air_density), _dt(d_t), _fluid(fluid), _wall(wall), _dwall(dwall), _air(air), _ghost(ghost), //air 6/21 _max_neighbor(max_neighbor), _col_rate(col_rate) { (hipMemcpyToSymbol(collision::_r_e, &_r_e, sizeof(double))); (hipMemcpyToSymbol(collision::_coll_limit, &_coll_limit, sizeof(double))); (hipMemcpyToSymbol(collision::_density, &_density, sizeof(double))); (hipMemcpyToSymbol(collision::_air_density, &_air_density, sizeof(double))); //6/21 (hipMemcpyToSymbol(collision::_dt, &_dt, sizeof(double))); (hipMemcpyToSymbol(collision::_fluid, &_fluid, sizeof(int))); (hipMemcpyToSymbol(collision::_wall, &_wall, sizeof(int))); (hipMemcpyToSymbol(collision::_dwall, &_dwall, sizeof(int))); (hipMemcpyToSymbol(collision::_air, &_air, sizeof(int))); //6/21 (hipMemcpyToSymbol(collision::_ghost, &_ghost, sizeof(int))); (hipMemcpyToSymbol(collision::_max_neighbor, &_max_neighbor, sizeof(int))); (hipMemcpyToSymbol(collision::_col_rate, &_col_rate, sizeof(double))); } Collision::~Collision(void) { } void Collision::calcCollisionTerm(Vec1dSoa& pos_x, Vec1dSoa& pos_y, Vec1dSoa& pos_z, Vec1dSoa& vel_x, Vec1dSoa& vel_y, Vec1dSoa& vel_z, Vec1dSoa& acc_x, Vec1dSoa& acc_y, Vec1dSoa& acc_z, Vec1iSoa& type, Vec1uiSoa& neighborIndex, Vec1uiSoa& neighborNum, unsigned int totalParticle) { int numThreadPerBlock = 256; int numBlock = (totalParticle + numThreadPerBlock - 1) / numThreadPerBlock; //vp hipEvent_t start, stop; float elapseTime; hipEventCreate(&start); hipEventCreate(&stop); Vec1dSoa afterVelocity_x; Vec1dSoa afterVelocity_y; Vec1dSoa afterVelocity_z; afterVelocity_x.resize(totalParticle, 0.0); afterVelocity_y.resize(totalParticle, 0.0); afterVelocity_z.resize(totalParticle, 0.0); collision::calcCollision << <numBlock, numThreadPerBlock >> >( pos_x._rawPointer, pos_y._rawPointer, pos_z._rawPointer, vel_x._rawPointer, vel_y._rawPointer, vel_z._rawPointer, acc_x._rawPointer, acc_y._rawPointer, acc_z._rawPointer, type._rawPointer, afterVelocity_x._rawPointer, afterVelocity_y._rawPointer, afterVelocity_z._rawPointer, neighborIndex._rawPointer, neighborNum._rawPointer, totalParticle); CHECK(hipDeviceSynchronize()); hipError_t err = hipGetLastError(); if (err != hipSuccess){ printf("Collision_error : %s\n", hipGetErrorString(err)); } afterVelocity_x.clear(); afterVelocity_y.clear(); afterVelocity_z.clear(); } void Collision::HighOrdercalcCollisionTerm(Vec1dSoa& pos_x, Vec1dSoa& pos_y, Vec1dSoa& pos_z, Vec1dSoa& vel_x, Vec1dSoa& vel_y, Vec1dSoa& vel_z, Vec1dSoa& acc_x, Vec1dSoa& acc_y, Vec1dSoa& acc_z, Vec1iSoa& type, Vec1uiSoa& neighborIndex, Vec1uiSoa& neighborNum, unsigned int totalParticle) { int numThreadPerBlock = 256; int numBlock = (totalParticle + numThreadPerBlock - 1) / numThreadPerBlock; //vp hipEvent_t start, stop; float elapseTime; hipEventCreate(&start); hipEventCreate(&stop); Vec1dSoa afterVelocity_x; Vec1dSoa afterVelocity_y; Vec1dSoa afterVelocity_z; afterVelocity_x.resize(totalParticle, 0.0); afterVelocity_y.resize(totalParticle, 0.0); afterVelocity_z.resize(totalParticle, 0.0); collision::HighOrdercalcCollision << <numBlock, numThreadPerBlock >> >( pos_x._rawPointer, pos_y._rawPointer, pos_z._rawPointer, vel_x._rawPointer, vel_y._rawPointer, vel_z._rawPointer, acc_x._rawPointer, acc_y._rawPointer, acc_z._rawPointer, type._rawPointer, afterVelocity_x._rawPointer, afterVelocity_y._rawPointer, afterVelocity_z._rawPointer, neighborIndex._rawPointer, neighborNum._rawPointer, totalParticle); CHECK(hipDeviceSynchronize()); hipError_t err = hipGetLastError(); if (err != hipSuccess){ printf("Collision_error : %s\n", hipGetErrorString(err)); } afterVelocity_x.clear(); afterVelocity_y.clear(); afterVelocity_z.clear(); }
6443ba35e1d7a98c6735221e528e5aea943c6029.cu
#include <cuda_runtime.h> #include "VecSoa.cuh" #include "common.cuh" #include "Some.cuh" #include "Collision.cuh" namespace collision { __device__ double d_weight(double distance) { int i = 0; double weight_ij = 0.0; //double re = r_e; if (distance >= _r_e){ weight_ij = 0.0; } else{ weight_ij = (_r_e / distance) - 1.0; //weight_ij = pow((1.0 - (distance / re)), 2); //weight_ij = pow((distance / _r_e) - 1.0, 2.0); } return weight_ij; } __global__ void calcCollision( double* pos_x, double* pos_y, double* pos_z, double* vel_x, double* vel_y, double* vel_z, double* acc_x, double* acc_y, double* acc_z, int* type, double* afterVelocity_x, double* afterVelocity_y, double* afterVelocity_z, unsigned int* neighborIndex, unsigned int* neighborNum, unsigned int totalParticle) { int j = 0; //�Ώۗ��q�ԍ��p�ϐ� double x_ij; double y_ij; double z_ij; int ix, iy, iz; int jx, jy, jz; int jBacket; double pre_distance; double distance; double w; double limit2 = pow(_coll_limit, 2); double ForceDt; double mi, mj; int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < totalParticle){ if (type[threadId] == _fluid || type[threadId] == _air){ //6/21 //変更 6/21 if (type[threadId] == _fluid){ mi = _density; } else if (type[threadId] == _air){ mi = _air_density; } //mi = _density; const double3 Position_i = make_double3(pos_x[threadId], pos_y[threadId], pos_z[threadId]); const double3 Velocity_i = make_double3(vel_x[threadId], vel_y[threadId], vel_z[threadId]); double3 Vector = make_double3(vel_x[threadId], vel_y[threadId], vel_z[threadId]); //for (j = 0; j < totalParticle; j++){ for (int l = 0; l < neighborNum[threadId]; l++){ const int j = neighborIndex[threadId * _max_neighbor + l]; if (j == threadId || type[j] == _ghost) continue; double3 Position_j = make_double3(pos_x[j], pos_y[j], pos_z[j]); double3 Velocity_j = make_double3(vel_x[j], vel_y[j], vel_z[j]); x_ij = Position_j.x - Position_i.x; y_ij = Position_j.y - Position_i.y; z_ij = Position_j.z - Position_i.z; pre_distance = (x_ij*x_ij) + (y_ij*y_ij) + (z_ij*z_ij); if (pre_distance < limit2){ distance = sqrt(pre_distance); if (distance == 0.0) printf("calcCollision::indexNumber = %d\n", threadId); ForceDt = (Velocity_i.x - Velocity_j.x)*(x_ij / distance) + (Velocity_i.y - Velocity_j.y)*(y_ij / distance) + (Velocity_i.z - Velocity_j.z)*(z_ij / distance); if (ForceDt > 0.0){ //変更 6/21 if (type[threadId] == _fluid){ mj = _density; } else if (type[threadId] == _air){ mj = _air_density; } //mj = _density; ForceDt *= _col_rate * mi * mj / (mi + mj); Vector.x -= (ForceDt / mi)*(x_ij / distance); Vector.y -= (ForceDt / mi)*(y_ij / distance); Vector.z -= (ForceDt / mi)*(z_ij / distance); //printf("Collision\n"); } } } __syncthreads(); afterVelocity_x[threadId] = Vector.x; afterVelocity_y[threadId] = Vector.y; afterVelocity_z[threadId] = Vector.z; } } __syncthreads(); if (threadId < totalParticle){ if (type[threadId] == _fluid || type[threadId] == _air){ //6/21 pos_x[threadId] += (afterVelocity_x[threadId] - vel_x[threadId])*_dt; pos_y[threadId] += (afterVelocity_y[threadId] - vel_y[threadId])*_dt; pos_z[threadId] += (afterVelocity_z[threadId] - vel_z[threadId])*_dt; vel_x[threadId] = afterVelocity_x[threadId]; vel_y[threadId] = afterVelocity_y[threadId]; vel_z[threadId] = afterVelocity_z[threadId]; } } } __global__ void HighOrdercalcCollision( double* pos_x, double* pos_y, double* pos_z, double* vel_x, double* vel_y, double* vel_z, double* acc_x, double* acc_y, double* acc_z, int* type, double* afterVelocity_x, double* afterVelocity_y, double* afterVelocity_z, unsigned int* neighborIndex, unsigned int* neighborNum, unsigned int totalParticle) { int j = 0; //�Ώۗ��q�ԍ��p�ϐ� double x_ij; double y_ij; double z_ij; int ix, iy, iz; int jx, jy, jz; int jBacket; double pre_distance; double distance; double w; double limit2 = pow(_coll_limit, 2); double ForceDt; double mi, mj; int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < totalParticle){ if (type[threadId] == _fluid || type[threadId] == _air){ //6/21 //printf("%d\n", threadId); //変更 6/21 if (type[threadId] == _fluid){ mi = _density; } else if (type[threadId] == _air){ mi = _air_density; } //mi = _density; const double3 Position_i = make_double3(pos_x[threadId], pos_y[threadId], pos_z[threadId]); const double3 Velocity_i = make_double3(vel_x[threadId], vel_y[threadId], vel_z[threadId]); double3 Vector = make_double3(vel_x[threadId], vel_y[threadId], vel_z[threadId]); //for (j = 0; j < totalParticle; j++){ for (int l = 0; l < neighborNum[threadId]; l++){ const int j = neighborIndex[threadId * _max_neighbor + l]; if (j == threadId || type[j] == _ghost) continue; double3 Position_j = make_double3(pos_x[j], pos_y[j], pos_z[j]); double3 Velocity_j = make_double3(vel_x[j], vel_y[j], vel_z[j]); x_ij = Position_j.x - Position_i.x; y_ij = Position_j.y - Position_i.y; z_ij = Position_j.z - Position_i.z; pre_distance = (x_ij*x_ij) + (y_ij*y_ij) + (z_ij*z_ij); if (pre_distance < limit2){ distance = sqrt(pre_distance); if (distance == 0.0) printf("calcCollision::indexNumber = %d\n", threadId); ForceDt = (Velocity_i.x - Velocity_j.x)*(x_ij / distance) + (Velocity_i.y - Velocity_j.y)*(y_ij / distance) + (Velocity_i.z - Velocity_j.z)*(z_ij / distance); if (ForceDt > 0.0){ //変更 6/21 if (type[threadId] == _fluid){ mj = _density; } else if (type[threadId] == _air){ mj = _air_density; } //mj = _density; ForceDt *= _col_rate * mi * mj / (mi + mj); Vector.x -= (ForceDt / mi)*(x_ij / distance); Vector.y -= (ForceDt / mi)*(y_ij / distance); Vector.z -= (ForceDt / mi)*(z_ij / distance); //printf("Collision\n"); } } } __syncthreads(); afterVelocity_x[threadId] = Vector.x; afterVelocity_y[threadId] = Vector.y; afterVelocity_z[threadId] = Vector.z; } } //__syncthreads(); //変更 6/9 if (threadId < totalParticle){ if (type[threadId] == _fluid || type[threadId] == _air){ //6/21 vel_x[threadId] = afterVelocity_x[threadId]; vel_y[threadId] = afterVelocity_y[threadId]; vel_z[threadId] = afterVelocity_z[threadId]; } } } }; Collision::Collision(const double re, const double coll_limit, const double density, const double air_density, const double d_t, //air_density 6/21 const int fluid, const int wall, const int dwall, const int air, const int ghost, //air 6/21 const int max_neighbor, const double col_rate) :_r_e(re), _coll_limit(coll_limit), _density(density), _air_density(air_density), _dt(d_t), _fluid(fluid), _wall(wall), _dwall(dwall), _air(air), _ghost(ghost), //air 6/21 _max_neighbor(max_neighbor), _col_rate(col_rate) { (cudaMemcpyToSymbol(collision::_r_e, &_r_e, sizeof(double))); (cudaMemcpyToSymbol(collision::_coll_limit, &_coll_limit, sizeof(double))); (cudaMemcpyToSymbol(collision::_density, &_density, sizeof(double))); (cudaMemcpyToSymbol(collision::_air_density, &_air_density, sizeof(double))); //6/21 (cudaMemcpyToSymbol(collision::_dt, &_dt, sizeof(double))); (cudaMemcpyToSymbol(collision::_fluid, &_fluid, sizeof(int))); (cudaMemcpyToSymbol(collision::_wall, &_wall, sizeof(int))); (cudaMemcpyToSymbol(collision::_dwall, &_dwall, sizeof(int))); (cudaMemcpyToSymbol(collision::_air, &_air, sizeof(int))); //6/21 (cudaMemcpyToSymbol(collision::_ghost, &_ghost, sizeof(int))); (cudaMemcpyToSymbol(collision::_max_neighbor, &_max_neighbor, sizeof(int))); (cudaMemcpyToSymbol(collision::_col_rate, &_col_rate, sizeof(double))); } Collision::~Collision(void) { } void Collision::calcCollisionTerm(Vec1dSoa& pos_x, Vec1dSoa& pos_y, Vec1dSoa& pos_z, Vec1dSoa& vel_x, Vec1dSoa& vel_y, Vec1dSoa& vel_z, Vec1dSoa& acc_x, Vec1dSoa& acc_y, Vec1dSoa& acc_z, Vec1iSoa& type, Vec1uiSoa& neighborIndex, Vec1uiSoa& neighborNum, unsigned int totalParticle) { int numThreadPerBlock = 256; int numBlock = (totalParticle + numThreadPerBlock - 1) / numThreadPerBlock; //���Ԍv���p cudaEvent_t start, stop; float elapseTime; cudaEventCreate(&start); cudaEventCreate(&stop); Vec1dSoa afterVelocity_x; Vec1dSoa afterVelocity_y; Vec1dSoa afterVelocity_z; afterVelocity_x.resize(totalParticle, 0.0); afterVelocity_y.resize(totalParticle, 0.0); afterVelocity_z.resize(totalParticle, 0.0); collision::calcCollision << <numBlock, numThreadPerBlock >> >( pos_x._rawPointer, pos_y._rawPointer, pos_z._rawPointer, vel_x._rawPointer, vel_y._rawPointer, vel_z._rawPointer, acc_x._rawPointer, acc_y._rawPointer, acc_z._rawPointer, type._rawPointer, afterVelocity_x._rawPointer, afterVelocity_y._rawPointer, afterVelocity_z._rawPointer, neighborIndex._rawPointer, neighborNum._rawPointer, totalParticle); CHECK(cudaDeviceSynchronize()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess){ printf("Collision_error : %s\n", cudaGetErrorString(err)); } afterVelocity_x.clear(); afterVelocity_y.clear(); afterVelocity_z.clear(); } void Collision::HighOrdercalcCollisionTerm(Vec1dSoa& pos_x, Vec1dSoa& pos_y, Vec1dSoa& pos_z, Vec1dSoa& vel_x, Vec1dSoa& vel_y, Vec1dSoa& vel_z, Vec1dSoa& acc_x, Vec1dSoa& acc_y, Vec1dSoa& acc_z, Vec1iSoa& type, Vec1uiSoa& neighborIndex, Vec1uiSoa& neighborNum, unsigned int totalParticle) { int numThreadPerBlock = 256; int numBlock = (totalParticle + numThreadPerBlock - 1) / numThreadPerBlock; //���Ԍv���p cudaEvent_t start, stop; float elapseTime; cudaEventCreate(&start); cudaEventCreate(&stop); Vec1dSoa afterVelocity_x; Vec1dSoa afterVelocity_y; Vec1dSoa afterVelocity_z; afterVelocity_x.resize(totalParticle, 0.0); afterVelocity_y.resize(totalParticle, 0.0); afterVelocity_z.resize(totalParticle, 0.0); collision::HighOrdercalcCollision << <numBlock, numThreadPerBlock >> >( pos_x._rawPointer, pos_y._rawPointer, pos_z._rawPointer, vel_x._rawPointer, vel_y._rawPointer, vel_z._rawPointer, acc_x._rawPointer, acc_y._rawPointer, acc_z._rawPointer, type._rawPointer, afterVelocity_x._rawPointer, afterVelocity_y._rawPointer, afterVelocity_z._rawPointer, neighborIndex._rawPointer, neighborNum._rawPointer, totalParticle); CHECK(cudaDeviceSynchronize()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess){ printf("Collision_error : %s\n", cudaGetErrorString(err)); } afterVelocity_x.clear(); afterVelocity_y.clear(); afterVelocity_z.clear(); }
37d546544c76eda20210a7520a04c9c3e0f0c580.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma, created on 25.02.2018 // #include<ops/declarable/helpers/batchnorm.h> #include <helpers/ShapeUtils.h> #include <OmpLaunchHelper.h> #include <ConstantTadHelper.h> #include <PointersManager.h> namespace nd4j { namespace ops { namespace helpers { ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void batchnormCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vMean, const Nd4jLong* meanShapeInfo, const void* vVariance, const Nd4jLong* varianceShapeInfo, const void* vGamma, const Nd4jLong* gammaShapeInfo, const void* vBeta, const Nd4jLong* betaShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets, const T epsilon) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); const auto mean = reinterpret_cast<const T*>(vMean); const auto variance = reinterpret_cast<const T*>(vVariance); const auto gamma = reinterpret_cast<const T*>(vGamma); const auto beta = reinterpret_cast<const T*>(vBeta); // maxRank = xRank = zRank, minRank = meanRank = varianceRank = gammaRank = betaRank __shared__ Nd4jLong minLen, tadLen, totalThreads; if (threadIdx.x == 0) { totalThreads = gridDim.x * blockDim.x; minLen = shape::length(meanShapeInfo); tadLen = shape::length(xShapeInfo) / minLen; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (uint i = tid; i < minLen; i += totalThreads) { const auto meanOffset = shape::getIndexOffset(i, meanShapeInfo); const auto varianceOffset = shape::getIndexOffset(i, varianceShapeInfo); T sigmaInvGam = 1. / nd4j::math::nd4j_sqrt<T, T>(variance[varianceOffset] + epsilon); if(gamma != nullptr) sigmaInvGam *= gamma[shape::getIndexOffset(i, gammaShapeInfo)]; auto betaOffset = 0; if(beta != nullptr) betaOffset = shape::getIndexOffset(i, betaShapeInfo); const auto xTad = x + xTadOffsets[i]; auto zTad = z + zTadOffsets[i]; for (uint j = 0; j < tadLen; ++j) { const auto xTadOffset = shape::getIndexOffset(j, xTadShapeInfo); const auto zTadOffset = shape::getIndexOffset(j, zTadShapeInfo); zTad[zTadOffset] = (xTad[xTadOffset] - mean[meanOffset]) * sigmaInvGam; if(beta != nullptr) zTad[zTadOffset] += beta[betaOffset]; } } } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void batchnormCuda2(const void* vx, const Nd4jLong* xShapeInfo, const void* vMean, const Nd4jLong* meanShapeInfo, const void* vVariance, const Nd4jLong* varianceShapeInfo, const void* vGamma, const Nd4jLong* gammaShapeInfo, const void* vBeta, const Nd4jLong* betaShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int numDims, const int* dims, const T epsilon) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); const auto mean = reinterpret_cast<const T*>(vMean); const auto variance = reinterpret_cast<const T*>(vVariance); const auto gamma = reinterpret_cast<const T*>(vGamma); const auto beta = reinterpret_cast<const T*>(vBeta); __shared__ int xRank, minRank; // xRank == zRank. minRank = meanRank = varianceRank = gammaRank = betaRank __shared__ Nd4jLong xLen, totalThreads, *sharedMem; // xLen = zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); totalThreads = gridDim.x * blockDim.x; xLen = shape::length(xShapeInfo); xRank = shape::rank(xShapeInfo); minRank = shape::rank(meanShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * xRank; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (uint i = tid; i < xLen; i += totalThreads) { shape::index2coords(i, xShapeInfo, coords); const auto xOffset = shape::getOffset(xShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); if(minRank == xRank) { for (uint i = 0, j = 0; i < xRank; ++i) { if(j < numDims && i != dims[j]) coords[i] = 0; else ++j; } } else // minRank = numDims = 1 in this case coords[0] = coords[dims[0]]; const auto meanOffset = shape::getOffset(meanShapeInfo, coords); const auto varianceOffset = shape::getOffset(varianceShapeInfo, coords); T sigmaInvGam = 1. / nd4j::math::nd4j_sqrt<T, T>(variance[varianceOffset] + epsilon); if(gamma != nullptr) { const auto gammaOffset = shape::getOffset(gammaShapeInfo, coords); sigmaInvGam *= gamma[gammaOffset]; } z[zOffset] = (x[xOffset] - mean[meanOffset]) * sigmaInvGam; if(beta != nullptr) { const auto betaOffset = shape::getOffset(betaShapeInfo, coords); z[zOffset] += beta[betaOffset]; } } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void batchnormCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vMean, const Nd4jLong* meanShapeInfo, const void* vVariance, const Nd4jLong* varianceShapeInfo, const void* vGamma, const Nd4jLong* gammaShapeInfo, const void* vBeta, const Nd4jLong* betaShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets, const double epsilon) { hipLaunchKernelGGL(( batchnormCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vx, xShapeInfo, vMean, meanShapeInfo, vVariance, varianceShapeInfo, vGamma, gammaShapeInfo, vBeta, betaShapeInfo, vz, zShapeInfo, xTadShapeInfo, xTadOffsets, zTadShapeInfo, zTadOffsets, static_cast<T>(epsilon)); } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void batchnormCudaLauncher2(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vMean, const Nd4jLong* meanShapeInfo, const void* vVariance, const Nd4jLong* varianceShapeInfo, const void* vGamma, const Nd4jLong* gammaShapeInfo, const void* vBeta, const Nd4jLong* betaShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int numDims, const int* dims, const double epsilon) { hipLaunchKernelGGL(( batchnormCuda2<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vMean, meanShapeInfo, vVariance, varianceShapeInfo, vGamma, gammaShapeInfo, vBeta, betaShapeInfo, vz, zShapeInfo, numDims, dims, static_cast<T>(epsilon)); } ////////////////////////////////////////////////////////////////////////// void batchnorm(const NDArray* input, const NDArray* mean, const NDArray* variance, const NDArray* gamma, const NDArray* beta, NDArray* output, const std::vector<int>& axes, const double epsilon) { std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(input->rankOf(), axes); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimsToExclude); auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimsToExclude); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (mean->lengthOf() + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(input->getContext(), "batchnorm"); NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta}); BUILD_SINGLE_SELECTOR(input->dataType(), batchnormCudaLauncher, (blocksPerGrid, threadsPerBlock, input->getContext()->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), mean->getSpecialBuffer(), mean->getSpecialShapeInfo(), variance->getSpecialBuffer(), variance->getSpecialShapeInfo(), gamma ? gamma->getSpecialBuffer() : nullptr, gamma ? gamma->getSpecialShapeInfo() : nullptr, beta ? beta->getSpecialBuffer() : nullptr, beta ? beta->getSpecialShapeInfo() : nullptr, output->specialBuffer(), output->specialShapeInfo(), packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), epsilon), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta}); manager.synchronize(); // const int threadsPerBlock = MAX_NUM_THREADS / 4; // const int blocksPerGrid = (input->lengthOf() + threadsPerBlock - 1) / threadsPerBlock; // const int sharedMem = sizeof(Nd4jLong) * threadsPerBlock * input->rankOf() + 128; // PointersManager manager(input->getContext(), "batchnorm"); // const int* dims = reinterpret_cast<int*>(manager.replicatePointer(axes.data(), axes.size() * sizeof(int))); // NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta}); // BUILD_SINGLE_SELECTOR(input->dataType(), batchnormCudaLauncher2, (blocksPerGrid, threadsPerBlock, sharedMem, input->getContext()->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), mean->getSpecialBuffer(), mean->getSpecialShapeInfo(), variance->getSpecialBuffer(), variance->getSpecialShapeInfo(), gamma ? gamma->getSpecialBuffer() : nullptr, gamma ? gamma->getSpecialShapeInfo() : nullptr, beta ? beta->getSpecialBuffer() : nullptr, beta ? beta->getSpecialShapeInfo() : nullptr, output->specialBuffer(), output->specialShapeInfo(), axes.size(), dims, epsilon), FLOAT_TYPES); // NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta}); // manager.synchronize(); } } } }
37d546544c76eda20210a7520a04c9c3e0f0c580.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma, created on 25.02.2018 // #include<ops/declarable/helpers/batchnorm.h> #include <helpers/ShapeUtils.h> #include <OmpLaunchHelper.h> #include <ConstantTadHelper.h> #include <PointersManager.h> namespace nd4j { namespace ops { namespace helpers { ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void batchnormCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vMean, const Nd4jLong* meanShapeInfo, const void* vVariance, const Nd4jLong* varianceShapeInfo, const void* vGamma, const Nd4jLong* gammaShapeInfo, const void* vBeta, const Nd4jLong* betaShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets, const T epsilon) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); const auto mean = reinterpret_cast<const T*>(vMean); const auto variance = reinterpret_cast<const T*>(vVariance); const auto gamma = reinterpret_cast<const T*>(vGamma); const auto beta = reinterpret_cast<const T*>(vBeta); // maxRank = xRank = zRank, minRank = meanRank = varianceRank = gammaRank = betaRank __shared__ Nd4jLong minLen, tadLen, totalThreads; if (threadIdx.x == 0) { totalThreads = gridDim.x * blockDim.x; minLen = shape::length(meanShapeInfo); tadLen = shape::length(xShapeInfo) / minLen; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (uint i = tid; i < minLen; i += totalThreads) { const auto meanOffset = shape::getIndexOffset(i, meanShapeInfo); const auto varianceOffset = shape::getIndexOffset(i, varianceShapeInfo); T sigmaInvGam = 1. / nd4j::math::nd4j_sqrt<T, T>(variance[varianceOffset] + epsilon); if(gamma != nullptr) sigmaInvGam *= gamma[shape::getIndexOffset(i, gammaShapeInfo)]; auto betaOffset = 0; if(beta != nullptr) betaOffset = shape::getIndexOffset(i, betaShapeInfo); const auto xTad = x + xTadOffsets[i]; auto zTad = z + zTadOffsets[i]; for (uint j = 0; j < tadLen; ++j) { const auto xTadOffset = shape::getIndexOffset(j, xTadShapeInfo); const auto zTadOffset = shape::getIndexOffset(j, zTadShapeInfo); zTad[zTadOffset] = (xTad[xTadOffset] - mean[meanOffset]) * sigmaInvGam; if(beta != nullptr) zTad[zTadOffset] += beta[betaOffset]; } } } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void batchnormCuda2(const void* vx, const Nd4jLong* xShapeInfo, const void* vMean, const Nd4jLong* meanShapeInfo, const void* vVariance, const Nd4jLong* varianceShapeInfo, const void* vGamma, const Nd4jLong* gammaShapeInfo, const void* vBeta, const Nd4jLong* betaShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int numDims, const int* dims, const T epsilon) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); const auto mean = reinterpret_cast<const T*>(vMean); const auto variance = reinterpret_cast<const T*>(vVariance); const auto gamma = reinterpret_cast<const T*>(vGamma); const auto beta = reinterpret_cast<const T*>(vBeta); __shared__ int xRank, minRank; // xRank == zRank. minRank = meanRank = varianceRank = gammaRank = betaRank __shared__ Nd4jLong xLen, totalThreads, *sharedMem; // xLen = zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); totalThreads = gridDim.x * blockDim.x; xLen = shape::length(xShapeInfo); xRank = shape::rank(xShapeInfo); minRank = shape::rank(meanShapeInfo); } __syncthreads(); auto coords = sharedMem + threadIdx.x * xRank; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (uint i = tid; i < xLen; i += totalThreads) { shape::index2coords(i, xShapeInfo, coords); const auto xOffset = shape::getOffset(xShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); if(minRank == xRank) { for (uint i = 0, j = 0; i < xRank; ++i) { if(j < numDims && i != dims[j]) coords[i] = 0; else ++j; } } else // minRank = numDims = 1 in this case coords[0] = coords[dims[0]]; const auto meanOffset = shape::getOffset(meanShapeInfo, coords); const auto varianceOffset = shape::getOffset(varianceShapeInfo, coords); T sigmaInvGam = 1. / nd4j::math::nd4j_sqrt<T, T>(variance[varianceOffset] + epsilon); if(gamma != nullptr) { const auto gammaOffset = shape::getOffset(gammaShapeInfo, coords); sigmaInvGam *= gamma[gammaOffset]; } z[zOffset] = (x[xOffset] - mean[meanOffset]) * sigmaInvGam; if(beta != nullptr) { const auto betaOffset = shape::getOffset(betaShapeInfo, coords); z[zOffset] += beta[betaOffset]; } } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void batchnormCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vMean, const Nd4jLong* meanShapeInfo, const void* vVariance, const Nd4jLong* varianceShapeInfo, const void* vGamma, const Nd4jLong* gammaShapeInfo, const void* vBeta, const Nd4jLong* betaShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets, const double epsilon) { batchnormCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vMean, meanShapeInfo, vVariance, varianceShapeInfo, vGamma, gammaShapeInfo, vBeta, betaShapeInfo, vz, zShapeInfo, xTadShapeInfo, xTadOffsets, zTadShapeInfo, zTadOffsets, static_cast<T>(epsilon)); } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void batchnormCudaLauncher2(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vMean, const Nd4jLong* meanShapeInfo, const void* vVariance, const Nd4jLong* varianceShapeInfo, const void* vGamma, const Nd4jLong* gammaShapeInfo, const void* vBeta, const Nd4jLong* betaShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int numDims, const int* dims, const double epsilon) { batchnormCuda2<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vMean, meanShapeInfo, vVariance, varianceShapeInfo, vGamma, gammaShapeInfo, vBeta, betaShapeInfo, vz, zShapeInfo, numDims, dims, static_cast<T>(epsilon)); } ////////////////////////////////////////////////////////////////////////// void batchnorm(const NDArray* input, const NDArray* mean, const NDArray* variance, const NDArray* gamma, const NDArray* beta, NDArray* output, const std::vector<int>& axes, const double epsilon) { std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(input->rankOf(), axes); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimsToExclude); auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimsToExclude); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (mean->lengthOf() + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(input->getContext(), "batchnorm"); NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta}); BUILD_SINGLE_SELECTOR(input->dataType(), batchnormCudaLauncher, (blocksPerGrid, threadsPerBlock, input->getContext()->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), mean->getSpecialBuffer(), mean->getSpecialShapeInfo(), variance->getSpecialBuffer(), variance->getSpecialShapeInfo(), gamma ? gamma->getSpecialBuffer() : nullptr, gamma ? gamma->getSpecialShapeInfo() : nullptr, beta ? beta->getSpecialBuffer() : nullptr, beta ? beta->getSpecialShapeInfo() : nullptr, output->specialBuffer(), output->specialShapeInfo(), packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), epsilon), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta}); manager.synchronize(); // const int threadsPerBlock = MAX_NUM_THREADS / 4; // const int blocksPerGrid = (input->lengthOf() + threadsPerBlock - 1) / threadsPerBlock; // const int sharedMem = sizeof(Nd4jLong) * threadsPerBlock * input->rankOf() + 128; // PointersManager manager(input->getContext(), "batchnorm"); // const int* dims = reinterpret_cast<int*>(manager.replicatePointer(axes.data(), axes.size() * sizeof(int))); // NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta}); // BUILD_SINGLE_SELECTOR(input->dataType(), batchnormCudaLauncher2, (blocksPerGrid, threadsPerBlock, sharedMem, input->getContext()->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), mean->getSpecialBuffer(), mean->getSpecialShapeInfo(), variance->getSpecialBuffer(), variance->getSpecialShapeInfo(), gamma ? gamma->getSpecialBuffer() : nullptr, gamma ? gamma->getSpecialShapeInfo() : nullptr, beta ? beta->getSpecialBuffer() : nullptr, beta ? beta->getSpecialShapeInfo() : nullptr, output->specialBuffer(), output->specialShapeInfo(), axes.size(), dims, epsilon), FLOAT_TYPES); // NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta}); // manager.synchronize(); } } } }
762dfffcad0ed13a2438a968b30d5fc0e2402ede.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_BATCH_NORMALIZATION_LAYER_INSTANTIATE #include "lbann/layers/regularizers/batch_normalization.hpp" #include "lbann/weights/weights_helpers.hpp" #include "lbann/utils/cuda.hpp" namespace lbann { namespace { /** CUDA kernel to compute channel sums. * Sums and squares of sums are used to compute mean and variance. */ template <El::Int block_size, typename TensorDataType> __global__ void channel_sums_kernel( El::Int channel_height, El::Int width, const TensorDataType * __restrict__ data, El::Int data_ldim, TensorDataType * __restrict__ sums, TensorDataType * __restrict__ sqsums) { // Indices const El::Int tid = threadIdx.x; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; // Initialize shared memory __shared__ TensorDataType shared_sums[block_size]; __shared__ TensorDataType shared_sqsums[block_size]; // Compute row sums in shared memory TensorDataType private_sum = 0; TensorDataType private_sqsum = 0; if (gidx < channel_height) { const auto& row = gidx + bidy * channel_height; for (El::Int col = 0; col < width; ++col) { const auto& x = data[row + col * data_ldim]; private_sum += x; private_sqsum += x * x; } } shared_sums[tid] = private_sum; shared_sqsums[tid] = private_sqsum; // Compute channel sum with shared memory reduction /// @todo unroll loops for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if(tid < stride) { shared_sums[tid] += shared_sums[tid + stride]; shared_sqsums[tid] += shared_sqsums[tid + stride]; } } // Output channel sum to global memory if (tid == 0) { cuda::atomic_add(&sums[bidy], shared_sums[0]); cuda::atomic_add(&sqsums[bidy], shared_sqsums[0]); } } /** CUDA kernel to compute statistics. * On input, global_mean and global_var are assumed to contain sums * and squares of sums, respectively. */ template <typename TensorDataType> __global__ void compute_statistics_kernel( El::Int num_sums, El::Int num_per_sum, TensorDataType epsilon, TensorDataType decay, TensorDataType * __restrict__ global_mean, TensorDataType * __restrict__ global_var, TensorDataType * __restrict__ global_running_mean, TensorDataType * __restrict__ global_running_var) { const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; const El::Int num_threads = blockDim.x * gridDim.x; for (El::Int i = gid; i < num_sums; i += num_threads) { TensorDataType num_per_sum_dt = TensorDataType(num_per_sum); // Compute mean and variance const auto& mean = global_mean[i] / num_per_sum_dt; const auto& sqmean = global_var[i] / num_per_sum_dt; auto var = num_per_sum_dt * (sqmean - mean * mean) / TensorDataType(num_per_sum - 1); var = var > epsilon ? var : epsilon; global_mean[gid] = mean; global_var[gid] = var; // Compute running statistics auto& running_mean = global_running_mean[gid]; auto& running_var = global_running_var[gid]; running_mean = decay * running_mean + (TensorDataType(1.0) - decay) * mean; running_var = decay * running_var + (TensorDataType(1.0) - decay) * var; } } /** CUDA kernel to apply batch normalization. */ template <El::Int block_size, typename TensorDataType> __global__ void batch_normalization_kernel( El::Int channel_height, El::Int width, const TensorDataType * __restrict__ global_input, El::Int input_ldim, const TensorDataType * __restrict__ global_mean, const TensorDataType * __restrict__ global_var, TensorDataType epsilon, const TensorDataType * __restrict__ global_scale, const TensorDataType * __restrict__ global_bias, TensorDataType * __restrict__ global_output, El::Int output_ldim) { // Indices const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; // Copy batch normalization parameters to private memory const auto& mean = global_mean[bidy]; const auto& var = global_var[bidy]; const auto& scale = global_scale[bidy]; const auto& bias = global_bias[bidy]; // Get reciprocal of standard deviation const auto& inv_stdev = cuda::rsqrt(var + epsilon); // Apply batch normalization if (gidx < channel_height) { const auto& row = gidx + bidy * channel_height; for (El::Int col = 0; col < width; ++col) { const auto& x = global_input[row + col * input_ldim]; const auto& xhat = (x - mean) * inv_stdev; const auto& y = scale * xhat + bias; global_output[row + col * output_ldim] = y; } } } /** CUDA kernel to compute gradients w.r.t. batch norm parameters. */ template <El::Int block_size, typename TensorDataType> __global__ void backprop1_kernel( El::Int channel_height, El::Int width, const TensorDataType * __restrict__ global_input, El::Int input_ldim, const TensorDataType * __restrict__ global_gradient_wrt_output, El::Int gradient_wrt_output_ldim, const TensorDataType * __restrict__ global_mean, const TensorDataType * __restrict__ global_var, TensorDataType epsilon, const TensorDataType * __restrict__ global_scale, TensorDataType * __restrict__ global_dscale, TensorDataType * __restrict__ global_dbias, TensorDataType * __restrict__ global_dmean, TensorDataType * __restrict__ global_dvar) { // Indices const El::Int tid = threadIdx.x; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; // Initialize shared memory __shared__ TensorDataType shared_dscale[block_size]; __shared__ TensorDataType shared_dbias[block_size]; __shared__ TensorDataType shared_dmean[block_size]; __shared__ TensorDataType shared_dvar[block_size]; // Copy batch normalization parameters to private memory const auto& mean = global_mean[bidy]; const auto& var = global_var[bidy]; const auto& scale = global_scale[bidy]; // Compute useful constants const TensorDataType zero = TensorDataType(0); const auto& inv_stdev = cuda::rsqrt(var + epsilon); const auto& dvar_factor = inv_stdev * inv_stdev * inv_stdev / TensorDataType(2); // Compute row-wise gradient contributions in shared memory auto dscale = zero; auto dbias = zero; auto dmean = zero; auto dvar = zero; if (gidx < channel_height) { const auto& row = gidx + bidy * channel_height; for(El::Int col = 0; col < width; ++col) { const auto& x = global_input[row + col * input_ldim]; const auto& xhat = (x - mean) * inv_stdev; const auto& dy = global_gradient_wrt_output[row + col * gradient_wrt_output_ldim]; dscale += dy * xhat; dbias += dy; const auto& dxhat = dy * scale; dmean += - dxhat * inv_stdev; dvar += - dxhat * (x - mean) * dvar_factor; } } shared_dscale[tid] = dscale; shared_dbias[tid] = dbias; shared_dmean[tid] = dmean; shared_dvar[tid] = dvar; // Compute gradients with shared memory reduction // @todo unroll loops for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { shared_dscale[tid] += shared_dscale[tid + stride]; shared_dbias[tid] += shared_dbias[tid + stride]; shared_dmean[tid] += shared_dmean[tid + stride]; shared_dvar[tid] += shared_dvar[tid + stride]; } } // Output channel sum to global memory if (tid == 0) { cuda::atomic_add(&global_dscale[bidy], shared_dscale[0]); cuda::atomic_add(&global_dbias[bidy], shared_dbias[0]); cuda::atomic_add(&global_dmean[bidy], shared_dmean[0]); cuda::atomic_add(&global_dvar[bidy], shared_dvar[0]); } } /** CUDA kernel to compute gradients w.r.t. input. */ template <El::Int block_size, typename TensorDataType> __global__ void backprop2_kernel( El::Int channel_height, El::Int local_width, El::Int num_per_sum, const TensorDataType * __restrict__ global_input, El::Int input_ldim, const TensorDataType * __restrict__ global_gradient_wrt_output, El::Int gradient_wrt_output_ldim, const TensorDataType * __restrict__ global_mean, const TensorDataType * __restrict__ global_var, TensorDataType epsilon, const TensorDataType * __restrict__ global_scale, const TensorDataType * __restrict__ global_dmean, const TensorDataType * __restrict__ global_dvar, TensorDataType * __restrict__ global_gradient_wrt_input, El::Int gradient_wrt_input_ldim) { // Indices const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; // Copy batch normalization parameters to private memory const auto& mean = global_mean[bidy]; const auto& var = global_var[bidy]; const auto& scale = global_scale[bidy]; const auto& dmean = global_dmean[bidy]; const auto& dvar = global_dvar[bidy]; // Compute useful constants const auto& inv_stdev = cuda::rsqrt(var + epsilon); const auto& dmean_term = dmean / TensorDataType(num_per_sum); const auto& dvar_term = dvar * TensorDataType(2) / TensorDataType(num_per_sum - 1); // Apply batch normalization if (gidx < channel_height) { const auto& row = gidx + bidy * channel_height; for (El::Int col = 0; col < local_width; ++col) { const auto& x = global_input[row + col * input_ldim]; const auto& dy = global_gradient_wrt_output[row + col * gradient_wrt_output_ldim]; const auto& dxhat = dy * scale; auto& dx = global_gradient_wrt_input[row + col * gradient_wrt_input_ldim]; dx = dxhat * inv_stdev + dmean_term + dvar_term * (x - mean); } } } } // namespace #ifdef LBANN_HAS_DISTCONV template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_distconv_adapter<TensorDataType, T_layout, Dev>::fp_compute() { assert_always(Dev == El::Device::GPU); assert_always(T_layout == data_layout::DATA_PARALLEL); using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; auto &l = dynamic_cast<batch_normalization_layer< TensorDataType, T_layout, Dev>&>(this->layer()); const bool is_training = l.m_model->get_execution_context().get_execution_mode() == execution_mode::training; auto& local_running_mean = ValuesGetter::mutable_values(this->get_weights(2)).Matrix(); auto& local_running_var = ValuesGetter::mutable_values(this->get_weights(3)).Matrix(); assert0(dc::tensor::View( m_scale, l.weights_values(0).LockedMatrix().LockedBuffer())); assert0(dc::tensor::View( m_bias, l.weights_values(1).LockedMatrix().LockedBuffer())); assert0(dc::tensor::View( m_running_mean, local_running_mean.Buffer())); assert0(dc::tensor::View( m_running_var, local_running_var.Buffer())); m_bn->forward_stage1(this->get_prev_activations(), m_mean, m_var, is_training); if (l.m_statistics_group_size == 0) { l.m_comm->allreduce(*l.m_mean_and_var, l.m_mean_and_var->RedundantComm(), El::mpi::SUM); } else if (l.m_statistics_group_size == 1) { // Local aggregation } else { LBANN_ERROR("statics_group_size must be either 0 or 1 for now."); } m_bn->forward_stage2(this->get_prev_activations(), m_mean, m_var, m_running_mean, m_running_var, m_scale, m_bias, this->get_activations(), is_training); } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_distconv_adapter<TensorDataType, T_layout, Dev>::bp_compute() { assert_always(Dev == El::Device::GPU); assert_always(T_layout == data_layout::DATA_PARALLEL); auto &l = dynamic_cast<batch_normalization_layer< TensorDataType, T_layout, Dev>&>(this->layer()); // Check execution mode const bool is_training = l.m_model->get_execution_context().get_execution_mode() == execution_mode::training; assert_always(is_training); assert0(dc::tensor::View( m_scale, l.weights_values(0).LockedMatrix().LockedBuffer())); m_bn->backward_stage1(this->get_prev_activations(), this->get_prev_error_signals(), m_mean, m_var, m_scale, m_scale_gradient, m_bias_gradient, m_mean_gradient, m_var_gradient); // Verbatim copy from bp_compute_gpu // Accumulate gradients if (is_training) { if (l.m_statistics_group_size == 0) { l.m_comm->allreduce(*l.m_mean_and_var_gradient, l.m_mean_and_var_gradient->RedundantComm(), El::mpi::SUM); } } else { Zero(*l.m_mean_and_var_gradient); } auto* scale_optimizer = l.get_weights(0).get_optimizer(); if (scale_optimizer != nullptr) { scale_optimizer->add_to_gradient(*l.m_scale_gradient, TensorDataType{1}, true); } auto* bias_optimizer = l.get_weights(1).get_optimizer(); if (bias_optimizer != nullptr) { bias_optimizer->add_to_gradient(*l.m_bias_gradient, TensorDataType{1}, true); } m_bn->backward_stage2(this->get_prev_activations(), this->get_prev_error_signals(), m_mean, m_var, m_scale, m_mean_gradient, m_var_gradient, this->get_error_signals()); } #endif // LBANN_HAS_DISTCONV template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_layer<TensorDataType, T_layout, Dev>::fp_compute() { #ifdef LBANN_HAS_DISTCONV if (this->distconv_enabled()) { get_distconv_adapter().fp_compute(); return; } #endif // LBANN_HAS_DISTCONV const bool is_training = this->m_model->get_execution_context().get_execution_mode() == execution_mode::training; // CUDA objects CHECK_CUDA(hipSetDevice(El::GPUManager::Device())); auto&& stream = El::GPUManager::Stream(); // Matrices const auto& input = this->get_prev_activations(); const auto& local_input = input.LockedMatrix(); auto& local_output = this->get_local_activations(); // Matrix parameters const auto& width = input.Width(); const auto& local_width = local_input.Width(); const auto& output_dims = this->get_output_dims(); const auto& num_channels = output_dims[0]; const auto& channel_size = this->get_output_size() / num_channels; // Compute statistics if (is_training) { using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; // Local matrices auto& local_mean = this->m_mean_v->Matrix(); auto& local_var = this->m_var_v->Matrix(); auto& local_running_mean = ValuesGetter::mutable_values(this->get_weights(2)).Matrix(); auto& local_running_var = ValuesGetter::mutable_values(this->get_weights(3)).Matrix(); // Compute sums and sums of squares El::Zero(local_mean); El::Zero(local_var); if (!local_input.IsEmpty()) { const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; hipLaunchKernelGGL(( channel_sums_kernel<block_size>) , dim3(grid_dims), dim3(block_dims), 0, stream, channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_mean.Buffer(), local_var.Buffer()); } El::Int num_per_sum; if (this->m_statistics_group_size == 0) { // Global statistics aggregation; allreduce on fused buffer. this->m_comm->allreduce(*this->m_mean_and_var, this->m_mean_and_var->RedundantComm(), El::mpi::SUM); num_per_sum = channel_size * width; } else if (this->m_statistics_group_size == 1) { // Local aggregation, no allreduce needed. num_per_sum = channel_size * local_width; } else { // Grouped batchnorm. Allreduce on fused buffer. this->m_comm->allreduce(*this->m_mean_and_var, this->m_comm->get_packed_group_comm(this->m_statistics_group_size), El::mpi::SUM); if (this->m_num_per_sum_cache.count(width) == 0) { num_per_sum = channel_size * local_width; num_per_sum = this->m_comm->allreduce( num_per_sum, this->m_comm->get_packed_group_comm(this->m_statistics_group_size)); this->m_num_per_sum_cache[width] = num_per_sum; } else { num_per_sum = this->m_num_per_sum_cache[width]; } } // Compute minibatch statistics if (num_per_sum <= 1) { El::Fill(local_var, TensorDataType(1.0)); } else if (num_channels > 0) { const El::Int block_dim = 256; const El::Int grid_dim = (num_channels + block_dim - 1) / block_dim; hipLaunchKernelGGL(( compute_statistics_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, num_channels, num_per_sum, this->m_epsilon, this->m_decay, local_mean.Buffer(), local_var.Buffer(), local_running_mean.Buffer(), local_running_var.Buffer()); } } // Apply batch normalization const auto& local_scale = this->weights_values(0).LockedMatrix(); const auto& local_bias = this->weights_values(1).LockedMatrix(); const auto& local_mean = (is_training ? this->m_mean_v->LockedMatrix() : this->weights_values(2).LockedMatrix()); const auto& local_var = (is_training ? this->m_var_v->LockedMatrix() : this->weights_values(3).LockedMatrix()); if (!local_input.IsEmpty()) { const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; hipLaunchKernelGGL(( batch_normalization_kernel<block_size>) , dim3(grid_dims), dim3(block_dims), 0, stream, channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, local_scale.LockedBuffer(), local_bias.LockedBuffer(), local_output.Buffer(), local_output.LDim()); } } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_layer<TensorDataType, T_layout, Dev>::bp_compute() { #ifdef LBANN_HAS_DISTCONV if (this->distconv_enabled()) { get_distconv_adapter().bp_compute(); return; } #endif // LBANN_HAS_DISTCONV const bool is_training = this->m_model->get_execution_context().get_execution_mode() == execution_mode::training; // CUDA objects CHECK_CUDA(hipSetDevice(El::GPUManager::Device())); auto&& stream = El::GPUManager::Stream(); // Matrices const auto& local_scale = this->weights_values(0).LockedMatrix(); const auto& local_mean = (is_training ? this->m_mean_v->LockedMatrix() : this->weights_values(2).LockedMatrix()); const auto& local_var = (is_training ? this->m_var_v->LockedMatrix() : this->weights_values(3).LockedMatrix()); const auto& input = this->get_prev_activations(); const auto& local_input = input.LockedMatrix(); const auto& local_gradient_wrt_output = this->get_local_prev_error_signals(); auto& local_gradient_wrt_input = this->get_local_error_signals(); auto& local_mean_gradient = this->m_mean_gradient_v->Matrix(); auto& local_var_gradient = this->m_var_gradient_v->Matrix(); auto& local_scale_gradient = this->m_scale_gradient->Matrix(); auto& local_bias_gradient = this->m_bias_gradient->Matrix(); // Matrix parameters const auto& width = input.Width(); const auto& local_width = local_input.Width(); const auto& output_dims = this->get_output_dims(); const auto& num_channels = output_dims[0]; const auto& channel_size = this->get_output_size() / num_channels; // Compute local gradients // Compute gradients w.r.t. batch norm parameters El::Zero(local_scale_gradient); El::Zero(local_bias_gradient); El::Zero(local_mean_gradient); El::Zero(local_var_gradient); if (!local_input.IsEmpty()) { const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; hipLaunchKernelGGL(( backprop1_kernel<block_size>) , dim3(grid_dims), dim3(block_dims), 0, stream, channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, local_scale.LockedBuffer(), local_scale_gradient.Buffer(), local_bias_gradient.Buffer(), local_mean_gradient.Buffer(), local_var_gradient.Buffer()); } // Accumulate gradients if (is_training) { if (this->m_statistics_group_size == 0) { // Global aggregation; allreduce on fused buffer. this->m_comm->allreduce(*this->m_mean_and_var_gradient, this->m_mean_and_var_gradient->RedundantComm(), El::mpi::SUM); } else if (this->m_statistics_group_size > 1) { // Grouped batchnorm; allreduce on fused buffer. this->m_comm->allreduce(*this->m_mean_and_var_gradient, this->m_comm->get_packed_group_comm(this->m_statistics_group_size), El::mpi::SUM); } } else { // Zero fused buffer. El::Zero(*this->m_mean_and_var_gradient); } auto* scale_optimizer = this->get_weights(0).get_optimizer(); if (scale_optimizer != nullptr) { scale_optimizer->add_to_gradient(*this->m_scale_gradient, TensorDataType(1.0), true); } auto* bias_optimizer = this->get_weights(1).get_optimizer(); if (bias_optimizer != nullptr) { bias_optimizer->add_to_gradient(*this->m_bias_gradient, TensorDataType(1.0), true); } // Compute error signal El::Int num_per_sum; if (this->m_statistics_group_size == 0) { // Global statistics aggregation. num_per_sum = channel_size * width; } else if (this->m_statistics_group_size == 1) { // Local aggregation. num_per_sum = channel_size * local_width; } else { // Grouped batchnorm. num_per_sum = this->m_num_per_sum_cache[width]; // This was computed in FP. } if (num_per_sum <= 1) { El::Zero(local_gradient_wrt_input); } else if (!local_input.IsEmpty()) { const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; hipLaunchKernelGGL(( backprop2_kernel<block_size>) , dim3(grid_dims), dim3(block_dims), 0, stream, channel_size, local_width, num_per_sum, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, local_scale.LockedBuffer(), local_mean_gradient.LockedBuffer(), local_var_gradient.LockedBuffer(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } #define PROTO(T) \ template class batch_normalization_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
762dfffcad0ed13a2438a968b30d5fc0e2402ede.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_BATCH_NORMALIZATION_LAYER_INSTANTIATE #include "lbann/layers/regularizers/batch_normalization.hpp" #include "lbann/weights/weights_helpers.hpp" #include "lbann/utils/cuda.hpp" namespace lbann { namespace { /** CUDA kernel to compute channel sums. * Sums and squares of sums are used to compute mean and variance. */ template <El::Int block_size, typename TensorDataType> __global__ void channel_sums_kernel( El::Int channel_height, El::Int width, const TensorDataType * __restrict__ data, El::Int data_ldim, TensorDataType * __restrict__ sums, TensorDataType * __restrict__ sqsums) { // Indices const El::Int tid = threadIdx.x; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; // Initialize shared memory __shared__ TensorDataType shared_sums[block_size]; __shared__ TensorDataType shared_sqsums[block_size]; // Compute row sums in shared memory TensorDataType private_sum = 0; TensorDataType private_sqsum = 0; if (gidx < channel_height) { const auto& row = gidx + bidy * channel_height; for (El::Int col = 0; col < width; ++col) { const auto& x = data[row + col * data_ldim]; private_sum += x; private_sqsum += x * x; } } shared_sums[tid] = private_sum; shared_sqsums[tid] = private_sqsum; // Compute channel sum with shared memory reduction /// @todo unroll loops for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if(tid < stride) { shared_sums[tid] += shared_sums[tid + stride]; shared_sqsums[tid] += shared_sqsums[tid + stride]; } } // Output channel sum to global memory if (tid == 0) { cuda::atomic_add(&sums[bidy], shared_sums[0]); cuda::atomic_add(&sqsums[bidy], shared_sqsums[0]); } } /** CUDA kernel to compute statistics. * On input, global_mean and global_var are assumed to contain sums * and squares of sums, respectively. */ template <typename TensorDataType> __global__ void compute_statistics_kernel( El::Int num_sums, El::Int num_per_sum, TensorDataType epsilon, TensorDataType decay, TensorDataType * __restrict__ global_mean, TensorDataType * __restrict__ global_var, TensorDataType * __restrict__ global_running_mean, TensorDataType * __restrict__ global_running_var) { const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; const El::Int num_threads = blockDim.x * gridDim.x; for (El::Int i = gid; i < num_sums; i += num_threads) { TensorDataType num_per_sum_dt = TensorDataType(num_per_sum); // Compute mean and variance const auto& mean = global_mean[i] / num_per_sum_dt; const auto& sqmean = global_var[i] / num_per_sum_dt; auto var = num_per_sum_dt * (sqmean - mean * mean) / TensorDataType(num_per_sum - 1); var = var > epsilon ? var : epsilon; global_mean[gid] = mean; global_var[gid] = var; // Compute running statistics auto& running_mean = global_running_mean[gid]; auto& running_var = global_running_var[gid]; running_mean = decay * running_mean + (TensorDataType(1.0) - decay) * mean; running_var = decay * running_var + (TensorDataType(1.0) - decay) * var; } } /** CUDA kernel to apply batch normalization. */ template <El::Int block_size, typename TensorDataType> __global__ void batch_normalization_kernel( El::Int channel_height, El::Int width, const TensorDataType * __restrict__ global_input, El::Int input_ldim, const TensorDataType * __restrict__ global_mean, const TensorDataType * __restrict__ global_var, TensorDataType epsilon, const TensorDataType * __restrict__ global_scale, const TensorDataType * __restrict__ global_bias, TensorDataType * __restrict__ global_output, El::Int output_ldim) { // Indices const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; // Copy batch normalization parameters to private memory const auto& mean = global_mean[bidy]; const auto& var = global_var[bidy]; const auto& scale = global_scale[bidy]; const auto& bias = global_bias[bidy]; // Get reciprocal of standard deviation const auto& inv_stdev = cuda::rsqrt(var + epsilon); // Apply batch normalization if (gidx < channel_height) { const auto& row = gidx + bidy * channel_height; for (El::Int col = 0; col < width; ++col) { const auto& x = global_input[row + col * input_ldim]; const auto& xhat = (x - mean) * inv_stdev; const auto& y = scale * xhat + bias; global_output[row + col * output_ldim] = y; } } } /** CUDA kernel to compute gradients w.r.t. batch norm parameters. */ template <El::Int block_size, typename TensorDataType> __global__ void backprop1_kernel( El::Int channel_height, El::Int width, const TensorDataType * __restrict__ global_input, El::Int input_ldim, const TensorDataType * __restrict__ global_gradient_wrt_output, El::Int gradient_wrt_output_ldim, const TensorDataType * __restrict__ global_mean, const TensorDataType * __restrict__ global_var, TensorDataType epsilon, const TensorDataType * __restrict__ global_scale, TensorDataType * __restrict__ global_dscale, TensorDataType * __restrict__ global_dbias, TensorDataType * __restrict__ global_dmean, TensorDataType * __restrict__ global_dvar) { // Indices const El::Int tid = threadIdx.x; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; // Initialize shared memory __shared__ TensorDataType shared_dscale[block_size]; __shared__ TensorDataType shared_dbias[block_size]; __shared__ TensorDataType shared_dmean[block_size]; __shared__ TensorDataType shared_dvar[block_size]; // Copy batch normalization parameters to private memory const auto& mean = global_mean[bidy]; const auto& var = global_var[bidy]; const auto& scale = global_scale[bidy]; // Compute useful constants const TensorDataType zero = TensorDataType(0); const auto& inv_stdev = cuda::rsqrt(var + epsilon); const auto& dvar_factor = inv_stdev * inv_stdev * inv_stdev / TensorDataType(2); // Compute row-wise gradient contributions in shared memory auto dscale = zero; auto dbias = zero; auto dmean = zero; auto dvar = zero; if (gidx < channel_height) { const auto& row = gidx + bidy * channel_height; for(El::Int col = 0; col < width; ++col) { const auto& x = global_input[row + col * input_ldim]; const auto& xhat = (x - mean) * inv_stdev; const auto& dy = global_gradient_wrt_output[row + col * gradient_wrt_output_ldim]; dscale += dy * xhat; dbias += dy; const auto& dxhat = dy * scale; dmean += - dxhat * inv_stdev; dvar += - dxhat * (x - mean) * dvar_factor; } } shared_dscale[tid] = dscale; shared_dbias[tid] = dbias; shared_dmean[tid] = dmean; shared_dvar[tid] = dvar; // Compute gradients with shared memory reduction // @todo unroll loops for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { shared_dscale[tid] += shared_dscale[tid + stride]; shared_dbias[tid] += shared_dbias[tid + stride]; shared_dmean[tid] += shared_dmean[tid + stride]; shared_dvar[tid] += shared_dvar[tid + stride]; } } // Output channel sum to global memory if (tid == 0) { cuda::atomic_add(&global_dscale[bidy], shared_dscale[0]); cuda::atomic_add(&global_dbias[bidy], shared_dbias[0]); cuda::atomic_add(&global_dmean[bidy], shared_dmean[0]); cuda::atomic_add(&global_dvar[bidy], shared_dvar[0]); } } /** CUDA kernel to compute gradients w.r.t. input. */ template <El::Int block_size, typename TensorDataType> __global__ void backprop2_kernel( El::Int channel_height, El::Int local_width, El::Int num_per_sum, const TensorDataType * __restrict__ global_input, El::Int input_ldim, const TensorDataType * __restrict__ global_gradient_wrt_output, El::Int gradient_wrt_output_ldim, const TensorDataType * __restrict__ global_mean, const TensorDataType * __restrict__ global_var, TensorDataType epsilon, const TensorDataType * __restrict__ global_scale, const TensorDataType * __restrict__ global_dmean, const TensorDataType * __restrict__ global_dvar, TensorDataType * __restrict__ global_gradient_wrt_input, El::Int gradient_wrt_input_ldim) { // Indices const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; // Copy batch normalization parameters to private memory const auto& mean = global_mean[bidy]; const auto& var = global_var[bidy]; const auto& scale = global_scale[bidy]; const auto& dmean = global_dmean[bidy]; const auto& dvar = global_dvar[bidy]; // Compute useful constants const auto& inv_stdev = cuda::rsqrt(var + epsilon); const auto& dmean_term = dmean / TensorDataType(num_per_sum); const auto& dvar_term = dvar * TensorDataType(2) / TensorDataType(num_per_sum - 1); // Apply batch normalization if (gidx < channel_height) { const auto& row = gidx + bidy * channel_height; for (El::Int col = 0; col < local_width; ++col) { const auto& x = global_input[row + col * input_ldim]; const auto& dy = global_gradient_wrt_output[row + col * gradient_wrt_output_ldim]; const auto& dxhat = dy * scale; auto& dx = global_gradient_wrt_input[row + col * gradient_wrt_input_ldim]; dx = dxhat * inv_stdev + dmean_term + dvar_term * (x - mean); } } } } // namespace #ifdef LBANN_HAS_DISTCONV template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_distconv_adapter<TensorDataType, T_layout, Dev>::fp_compute() { assert_always(Dev == El::Device::GPU); assert_always(T_layout == data_layout::DATA_PARALLEL); using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; auto &l = dynamic_cast<batch_normalization_layer< TensorDataType, T_layout, Dev>&>(this->layer()); const bool is_training = l.m_model->get_execution_context().get_execution_mode() == execution_mode::training; auto& local_running_mean = ValuesGetter::mutable_values(this->get_weights(2)).Matrix(); auto& local_running_var = ValuesGetter::mutable_values(this->get_weights(3)).Matrix(); assert0(dc::tensor::View( m_scale, l.weights_values(0).LockedMatrix().LockedBuffer())); assert0(dc::tensor::View( m_bias, l.weights_values(1).LockedMatrix().LockedBuffer())); assert0(dc::tensor::View( m_running_mean, local_running_mean.Buffer())); assert0(dc::tensor::View( m_running_var, local_running_var.Buffer())); m_bn->forward_stage1(this->get_prev_activations(), m_mean, m_var, is_training); if (l.m_statistics_group_size == 0) { l.m_comm->allreduce(*l.m_mean_and_var, l.m_mean_and_var->RedundantComm(), El::mpi::SUM); } else if (l.m_statistics_group_size == 1) { // Local aggregation } else { LBANN_ERROR("statics_group_size must be either 0 or 1 for now."); } m_bn->forward_stage2(this->get_prev_activations(), m_mean, m_var, m_running_mean, m_running_var, m_scale, m_bias, this->get_activations(), is_training); } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_distconv_adapter<TensorDataType, T_layout, Dev>::bp_compute() { assert_always(Dev == El::Device::GPU); assert_always(T_layout == data_layout::DATA_PARALLEL); auto &l = dynamic_cast<batch_normalization_layer< TensorDataType, T_layout, Dev>&>(this->layer()); // Check execution mode const bool is_training = l.m_model->get_execution_context().get_execution_mode() == execution_mode::training; assert_always(is_training); assert0(dc::tensor::View( m_scale, l.weights_values(0).LockedMatrix().LockedBuffer())); m_bn->backward_stage1(this->get_prev_activations(), this->get_prev_error_signals(), m_mean, m_var, m_scale, m_scale_gradient, m_bias_gradient, m_mean_gradient, m_var_gradient); // Verbatim copy from bp_compute_gpu // Accumulate gradients if (is_training) { if (l.m_statistics_group_size == 0) { l.m_comm->allreduce(*l.m_mean_and_var_gradient, l.m_mean_and_var_gradient->RedundantComm(), El::mpi::SUM); } } else { Zero(*l.m_mean_and_var_gradient); } auto* scale_optimizer = l.get_weights(0).get_optimizer(); if (scale_optimizer != nullptr) { scale_optimizer->add_to_gradient(*l.m_scale_gradient, TensorDataType{1}, true); } auto* bias_optimizer = l.get_weights(1).get_optimizer(); if (bias_optimizer != nullptr) { bias_optimizer->add_to_gradient(*l.m_bias_gradient, TensorDataType{1}, true); } m_bn->backward_stage2(this->get_prev_activations(), this->get_prev_error_signals(), m_mean, m_var, m_scale, m_mean_gradient, m_var_gradient, this->get_error_signals()); } #endif // LBANN_HAS_DISTCONV template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_layer<TensorDataType, T_layout, Dev>::fp_compute() { #ifdef LBANN_HAS_DISTCONV if (this->distconv_enabled()) { get_distconv_adapter().fp_compute(); return; } #endif // LBANN_HAS_DISTCONV const bool is_training = this->m_model->get_execution_context().get_execution_mode() == execution_mode::training; // CUDA objects CHECK_CUDA(cudaSetDevice(El::GPUManager::Device())); auto&& stream = El::GPUManager::Stream(); // Matrices const auto& input = this->get_prev_activations(); const auto& local_input = input.LockedMatrix(); auto& local_output = this->get_local_activations(); // Matrix parameters const auto& width = input.Width(); const auto& local_width = local_input.Width(); const auto& output_dims = this->get_output_dims(); const auto& num_channels = output_dims[0]; const auto& channel_size = this->get_output_size() / num_channels; // Compute statistics if (is_training) { using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; // Local matrices auto& local_mean = this->m_mean_v->Matrix(); auto& local_var = this->m_var_v->Matrix(); auto& local_running_mean = ValuesGetter::mutable_values(this->get_weights(2)).Matrix(); auto& local_running_var = ValuesGetter::mutable_values(this->get_weights(3)).Matrix(); // Compute sums and sums of squares El::Zero(local_mean); El::Zero(local_var); if (!local_input.IsEmpty()) { const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; channel_sums_kernel<block_size> <<<grid_dims, block_dims, 0, stream>>>( channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_mean.Buffer(), local_var.Buffer()); } El::Int num_per_sum; if (this->m_statistics_group_size == 0) { // Global statistics aggregation; allreduce on fused buffer. this->m_comm->allreduce(*this->m_mean_and_var, this->m_mean_and_var->RedundantComm(), El::mpi::SUM); num_per_sum = channel_size * width; } else if (this->m_statistics_group_size == 1) { // Local aggregation, no allreduce needed. num_per_sum = channel_size * local_width; } else { // Grouped batchnorm. Allreduce on fused buffer. this->m_comm->allreduce(*this->m_mean_and_var, this->m_comm->get_packed_group_comm(this->m_statistics_group_size), El::mpi::SUM); if (this->m_num_per_sum_cache.count(width) == 0) { num_per_sum = channel_size * local_width; num_per_sum = this->m_comm->allreduce( num_per_sum, this->m_comm->get_packed_group_comm(this->m_statistics_group_size)); this->m_num_per_sum_cache[width] = num_per_sum; } else { num_per_sum = this->m_num_per_sum_cache[width]; } } // Compute minibatch statistics if (num_per_sum <= 1) { El::Fill(local_var, TensorDataType(1.0)); } else if (num_channels > 0) { const El::Int block_dim = 256; const El::Int grid_dim = (num_channels + block_dim - 1) / block_dim; compute_statistics_kernel<<<grid_dim, block_dim, 0, stream>>>( num_channels, num_per_sum, this->m_epsilon, this->m_decay, local_mean.Buffer(), local_var.Buffer(), local_running_mean.Buffer(), local_running_var.Buffer()); } } // Apply batch normalization const auto& local_scale = this->weights_values(0).LockedMatrix(); const auto& local_bias = this->weights_values(1).LockedMatrix(); const auto& local_mean = (is_training ? this->m_mean_v->LockedMatrix() : this->weights_values(2).LockedMatrix()); const auto& local_var = (is_training ? this->m_var_v->LockedMatrix() : this->weights_values(3).LockedMatrix()); if (!local_input.IsEmpty()) { const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; batch_normalization_kernel<block_size> <<<grid_dims, block_dims, 0, stream>>>( channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, local_scale.LockedBuffer(), local_bias.LockedBuffer(), local_output.Buffer(), local_output.LDim()); } } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_layer<TensorDataType, T_layout, Dev>::bp_compute() { #ifdef LBANN_HAS_DISTCONV if (this->distconv_enabled()) { get_distconv_adapter().bp_compute(); return; } #endif // LBANN_HAS_DISTCONV const bool is_training = this->m_model->get_execution_context().get_execution_mode() == execution_mode::training; // CUDA objects CHECK_CUDA(cudaSetDevice(El::GPUManager::Device())); auto&& stream = El::GPUManager::Stream(); // Matrices const auto& local_scale = this->weights_values(0).LockedMatrix(); const auto& local_mean = (is_training ? this->m_mean_v->LockedMatrix() : this->weights_values(2).LockedMatrix()); const auto& local_var = (is_training ? this->m_var_v->LockedMatrix() : this->weights_values(3).LockedMatrix()); const auto& input = this->get_prev_activations(); const auto& local_input = input.LockedMatrix(); const auto& local_gradient_wrt_output = this->get_local_prev_error_signals(); auto& local_gradient_wrt_input = this->get_local_error_signals(); auto& local_mean_gradient = this->m_mean_gradient_v->Matrix(); auto& local_var_gradient = this->m_var_gradient_v->Matrix(); auto& local_scale_gradient = this->m_scale_gradient->Matrix(); auto& local_bias_gradient = this->m_bias_gradient->Matrix(); // Matrix parameters const auto& width = input.Width(); const auto& local_width = local_input.Width(); const auto& output_dims = this->get_output_dims(); const auto& num_channels = output_dims[0]; const auto& channel_size = this->get_output_size() / num_channels; // Compute local gradients // Compute gradients w.r.t. batch norm parameters El::Zero(local_scale_gradient); El::Zero(local_bias_gradient); El::Zero(local_mean_gradient); El::Zero(local_var_gradient); if (!local_input.IsEmpty()) { const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; backprop1_kernel<block_size> <<<grid_dims, block_dims, 0, stream>>>( channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, local_scale.LockedBuffer(), local_scale_gradient.Buffer(), local_bias_gradient.Buffer(), local_mean_gradient.Buffer(), local_var_gradient.Buffer()); } // Accumulate gradients if (is_training) { if (this->m_statistics_group_size == 0) { // Global aggregation; allreduce on fused buffer. this->m_comm->allreduce(*this->m_mean_and_var_gradient, this->m_mean_and_var_gradient->RedundantComm(), El::mpi::SUM); } else if (this->m_statistics_group_size > 1) { // Grouped batchnorm; allreduce on fused buffer. this->m_comm->allreduce(*this->m_mean_and_var_gradient, this->m_comm->get_packed_group_comm(this->m_statistics_group_size), El::mpi::SUM); } } else { // Zero fused buffer. El::Zero(*this->m_mean_and_var_gradient); } auto* scale_optimizer = this->get_weights(0).get_optimizer(); if (scale_optimizer != nullptr) { scale_optimizer->add_to_gradient(*this->m_scale_gradient, TensorDataType(1.0), true); } auto* bias_optimizer = this->get_weights(1).get_optimizer(); if (bias_optimizer != nullptr) { bias_optimizer->add_to_gradient(*this->m_bias_gradient, TensorDataType(1.0), true); } // Compute error signal El::Int num_per_sum; if (this->m_statistics_group_size == 0) { // Global statistics aggregation. num_per_sum = channel_size * width; } else if (this->m_statistics_group_size == 1) { // Local aggregation. num_per_sum = channel_size * local_width; } else { // Grouped batchnorm. num_per_sum = this->m_num_per_sum_cache[width]; // This was computed in FP. } if (num_per_sum <= 1) { El::Zero(local_gradient_wrt_input); } else if (!local_input.IsEmpty()) { const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; backprop2_kernel<block_size> <<<grid_dims, block_dims, 0, stream>>>( channel_size, local_width, num_per_sum, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, local_scale.LockedBuffer(), local_mean_gradient.LockedBuffer(), local_var_gradient.LockedBuffer(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } #define PROTO(T) \ template class batch_normalization_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
5bc4aaf33e16abd8e56e9b6964832f7059b0467f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2021 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #include "cuda_score_updater.hpp" #ifdef USE_CUDA_EXP namespace LightGBM { __global__ void AddScoreConstantKernel( const double val, const size_t offset, const data_size_t num_data, double* score) { const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x); if (data_index < num_data) { score[data_index + offset] += val; } } void CUDAScoreUpdater::LaunchAddScoreConstantKernel(const double val, const size_t offset) { const int num_blocks = (num_data_ + num_threads_per_block_) / num_threads_per_block_; Log::Debug("Adding init score = %lf", val); hipLaunchKernelGGL(( AddScoreConstantKernel), dim3(num_blocks), dim3(num_threads_per_block_), 0, 0, val, offset, num_data_, cuda_score_); } __global__ void MultiplyScoreConstantKernel( const double val, const size_t offset, const data_size_t num_data, double* score) { const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x); if (data_index < num_data) { score[data_index] *= val; } } void CUDAScoreUpdater::LaunchMultiplyScoreConstantKernel(const double val, const size_t offset) { const int num_blocks = (num_data_ + num_threads_per_block_) / num_threads_per_block_; hipLaunchKernelGGL(( MultiplyScoreConstantKernel), dim3(num_blocks), dim3(num_threads_per_block_), 0, 0, val, offset, num_data_, cuda_score_); } } // namespace LightGBM #endif // USE_CUDA_EXP
5bc4aaf33e16abd8e56e9b6964832f7059b0467f.cu
/*! * Copyright (c) 2021 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #include "cuda_score_updater.hpp" #ifdef USE_CUDA_EXP namespace LightGBM { __global__ void AddScoreConstantKernel( const double val, const size_t offset, const data_size_t num_data, double* score) { const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x); if (data_index < num_data) { score[data_index + offset] += val; } } void CUDAScoreUpdater::LaunchAddScoreConstantKernel(const double val, const size_t offset) { const int num_blocks = (num_data_ + num_threads_per_block_) / num_threads_per_block_; Log::Debug("Adding init score = %lf", val); AddScoreConstantKernel<<<num_blocks, num_threads_per_block_>>>(val, offset, num_data_, cuda_score_); } __global__ void MultiplyScoreConstantKernel( const double val, const size_t offset, const data_size_t num_data, double* score) { const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x); if (data_index < num_data) { score[data_index] *= val; } } void CUDAScoreUpdater::LaunchMultiplyScoreConstantKernel(const double val, const size_t offset) { const int num_blocks = (num_data_ + num_threads_per_block_) / num_threads_per_block_; MultiplyScoreConstantKernel<<<num_blocks, num_threads_per_block_>>>(val, offset, num_data_, cuda_score_); } } // namespace LightGBM #endif // USE_CUDA_EXP
9854cc6db1797dd13fdc97e12f9ebaf82f27cb42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sigmoid(float *inout, float *bias, int rows, int cols) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (j >= cols || i >= rows) return; float t = inout[i * cols + j]; inout[i * cols + j] = 1 / (1 + expf(-t)) + bias[i]; }
9854cc6db1797dd13fdc97e12f9ebaf82f27cb42.cu
#include "includes.h" __global__ void sigmoid(float *inout, float *bias, int rows, int cols) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (j >= cols || i >= rows) return; float t = inout[i * cols + j]; inout[i * cols + j] = 1 / (1 + expf(-t)) + bias[i]; }
4ec1f4eba7dd8af244e69b92bf07f6be5dbd2a78.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file optimizer_op.cu * \brief Optimizer operators * \author Leonard Lausen */ #include "./optimizer_op-inl.h" #include <hipcub/hipcub.hpp> namespace mxnet { namespace op { NNVM_REGISTER_OP(_contrib_group_adagrad_update) .set_attr<FComputeEx>("FComputeEx<gpu>", GroupAdagradUpdateEx<gpu>); } // namespace op } // namespace mxnet
4ec1f4eba7dd8af244e69b92bf07f6be5dbd2a78.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file optimizer_op.cu * \brief Optimizer operators * \author Leonard Lausen */ #include "./optimizer_op-inl.h" #include <cub/cub.cuh> namespace mxnet { namespace op { NNVM_REGISTER_OP(_contrib_group_adagrad_update) .set_attr<FComputeEx>("FComputeEx<gpu>", GroupAdagradUpdateEx<gpu>); } // namespace op } // namespace mxnet
30d0b981b0b75d7e06660638a0f3923dafede77a.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) { const auto s = sigmoid(z); return (1.0 - s) * s; } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh(scalar_t z) { const auto t = tanh(z); return 1 - (t * t); } template <typename scalar_t> __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) { return fmax(0.0, z) + fmin(0.0, alpha * (exp(z) - 1.0)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) { const auto e = exp(z); const auto d_relu = z < 0.0 ? 0.0 : 1.0; return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0); } template <typename scalar_t> __global__ void lltm_cuda_forward_kernel( const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> gates, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> old_cell, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> new_h, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> new_cell, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> input_gate, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> output_gate, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> candidate_cell) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < gates.size(2)){ input_gate[n][c] = sigmoid(gates[n][0][c]); output_gate[n][c] = sigmoid(gates[n][1][c]); candidate_cell[n][c] = elu(gates[n][2][c]); new_cell[n][c] = old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c]; new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c]; } } std::vector<torch::Tensor> lltm_cuda_forward( torch::Tensor input, torch::Tensor weights, torch::Tensor bias, torch::Tensor old_h, torch::Tensor old_cell) { auto X = torch::cat({old_h, input}, /*dim=*/1); auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1)); const auto batch_size = old_cell.size(0); const auto state_size = old_cell.size(1); auto gates = gate_weights.reshape({batch_size, 3, state_size}); auto new_h = torch::zeros_like(old_cell); auto new_cell = torch::zeros_like(old_cell); auto input_gate = torch::zeros_like(old_cell); auto output_gate = torch::zeros_like(old_cell); auto candidate_cell = torch::zeros_like(old_cell); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( lltm_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, gates.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), old_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), new_h.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), new_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), input_gate.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), output_gate.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), candidate_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>()); })); return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates}; } template <typename scalar_t> __global__ void lltm_cuda_backward_kernel( torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> d_old_cell, torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> d_gates, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> grad_h, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> grad_cell, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> new_cell, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> input_gate, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> output_gate, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> candidate_cell, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> gate_weights) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < d_gates.size(2)){ const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c]; const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c]; const auto d_new_cell = d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c]; d_old_cell[n][c] = d_new_cell; const auto d_candidate_cell = input_gate[n][c] * d_new_cell; const auto d_input_gate = candidate_cell[n][c] * d_new_cell; d_gates[n][0][c] = d_input_gate * d_sigmoid(gate_weights[n][0][c]); d_gates[n][1][c] = d_output_gate * d_sigmoid(gate_weights[n][1][c]); d_gates[n][2][c] = d_candidate_cell * d_elu(gate_weights[n][2][c]); } } std::vector<torch::Tensor> lltm_cuda_backward( torch::Tensor grad_h, torch::Tensor grad_cell, torch::Tensor new_cell, torch::Tensor input_gate, torch::Tensor output_gate, torch::Tensor candidate_cell, torch::Tensor X, torch::Tensor gates, torch::Tensor weights) { auto d_old_cell = torch::zeros_like(new_cell); auto d_gates = torch::zeros_like(gates); const auto batch_size = new_cell.size(0); const auto state_size = new_cell.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( lltm_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, d_old_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), d_gates.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), grad_h.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), grad_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), new_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), input_gate.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), output_gate.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), candidate_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), gates.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>()); })); auto d_gate_weights = d_gates.reshape({batch_size, 3*state_size}); auto d_weights = d_gate_weights.t().mm(X); auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true); auto d_X = d_gate_weights.mm(weights); auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size); auto d_input = d_X.slice(/*dim=*/1, state_size); return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates}; }
30d0b981b0b75d7e06660638a0f3923dafede77a.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) { const auto s = sigmoid(z); return (1.0 - s) * s; } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh(scalar_t z) { const auto t = tanh(z); return 1 - (t * t); } template <typename scalar_t> __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) { return fmax(0.0, z) + fmin(0.0, alpha * (exp(z) - 1.0)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) { const auto e = exp(z); const auto d_relu = z < 0.0 ? 0.0 : 1.0; return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0); } template <typename scalar_t> __global__ void lltm_cuda_forward_kernel( const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> gates, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> old_cell, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> new_h, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> new_cell, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> input_gate, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> output_gate, torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> candidate_cell) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < gates.size(2)){ input_gate[n][c] = sigmoid(gates[n][0][c]); output_gate[n][c] = sigmoid(gates[n][1][c]); candidate_cell[n][c] = elu(gates[n][2][c]); new_cell[n][c] = old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c]; new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c]; } } std::vector<torch::Tensor> lltm_cuda_forward( torch::Tensor input, torch::Tensor weights, torch::Tensor bias, torch::Tensor old_h, torch::Tensor old_cell) { auto X = torch::cat({old_h, input}, /*dim=*/1); auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1)); const auto batch_size = old_cell.size(0); const auto state_size = old_cell.size(1); auto gates = gate_weights.reshape({batch_size, 3, state_size}); auto new_h = torch::zeros_like(old_cell); auto new_cell = torch::zeros_like(old_cell); auto input_gate = torch::zeros_like(old_cell); auto output_gate = torch::zeros_like(old_cell); auto candidate_cell = torch::zeros_like(old_cell); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] { lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( gates.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), old_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), new_h.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), new_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), input_gate.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), output_gate.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), candidate_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>()); })); return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates}; } template <typename scalar_t> __global__ void lltm_cuda_backward_kernel( torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> d_old_cell, torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> d_gates, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> grad_h, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> grad_cell, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> new_cell, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> input_gate, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> output_gate, const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> candidate_cell, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> gate_weights) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < d_gates.size(2)){ const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c]; const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c]; const auto d_new_cell = d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c]; d_old_cell[n][c] = d_new_cell; const auto d_candidate_cell = input_gate[n][c] * d_new_cell; const auto d_input_gate = candidate_cell[n][c] * d_new_cell; d_gates[n][0][c] = d_input_gate * d_sigmoid(gate_weights[n][0][c]); d_gates[n][1][c] = d_output_gate * d_sigmoid(gate_weights[n][1][c]); d_gates[n][2][c] = d_candidate_cell * d_elu(gate_weights[n][2][c]); } } std::vector<torch::Tensor> lltm_cuda_backward( torch::Tensor grad_h, torch::Tensor grad_cell, torch::Tensor new_cell, torch::Tensor input_gate, torch::Tensor output_gate, torch::Tensor candidate_cell, torch::Tensor X, torch::Tensor gates, torch::Tensor weights) { auto d_old_cell = torch::zeros_like(new_cell); auto d_gates = torch::zeros_like(gates); const auto batch_size = new_cell.size(0); const auto state_size = new_cell.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] { lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( d_old_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), d_gates.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), grad_h.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), grad_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), new_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), input_gate.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), output_gate.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), candidate_cell.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(), gates.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>()); })); auto d_gate_weights = d_gates.reshape({batch_size, 3*state_size}); auto d_weights = d_gate_weights.t().mm(X); auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true); auto d_X = d_gate_weights.mm(weights); auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size); auto d_input = d_X.slice(/*dim=*/1, state_size); return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates}; }
bf5080b06ec3a16ffadac90bfcdc2d42dd2a20d8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <rocblas.h> #include "hip/hip_runtime.h" #include <host_defines.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #define BLOCK_SIZE 16 #define N 256 float getnum() { return rand()/((float) RAND_MAX); } __global__ void gpu_matrix_multiply(float *a, float *b, float *c, int n) { __shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; float tmp = 0.0f; int idx; for (int sub = 0; sub < gridDim.x; ++sub) // gridDim.x { idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if(idx >= n*n) { // n may not divisible by BLOCK_SIZE tile_a[threadIdx.y][threadIdx.x] = 0.0f; } else { tile_a[threadIdx.y][threadIdx.x] = a[idx]; } idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if(idx >= n*n) { tile_b[threadIdx.y][threadIdx.x] = 0.0f; } else { tile_b[threadIdx.y][threadIdx.x] = b[idx]; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if(row < n && col < n) { c[row * n + col] = tmp; } } void make_identity(float* a, int startx, int starty, int length, int n) { // fill a with the identity from start to end int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = (i-startx==j-starty) ? 1.0f : 0.0f; } } } void make_negidentity(float* a, int startx, int starty, int length, int n) { // fill a with the identity from start to end int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = (i-startx==j-starty) ? -1.0f : 0.0f; } } } void make_x(float* x, int length) { int i, j; for (i=0; i<length; i++) { for (j=0; j<length; j++) { x[i*length+j] = getnum(); } } } void make_zero(float* a, int startx, int starty, int length, int n) { int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = 0.0f; } } } void copy_x(float* a, float* x, int startx, int starty, int length, int n) { int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = x[(i-startx)*length+(j-starty)]; } } } void copy_2x(float* a, float* x, int startx, int starty, int length, int n) { int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = 2*x[(i-startx)*length+(j-starty)]; } } } void copy_negx(float* a, float* x, int startx, int starty, int length, int n) { int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = (-1)*x[(i-startx)*length+(j-starty)]; } } } void make_result(float* a, int length) { int i, j; int half = length>>1; for (i=0; i<length; i++) { for (j=0; j<length; j++) { if (i == j) { if (i>=half) a[i*length+j] = -1.0f; else a[i*length+j] = 1.0f; } else a[i*length+j] = 0.0f; } } } float rothVerf(float* a, float* b, int n) { float sum = 0; int i, j; for (i=0; i<n; i++) { for (j=0; j<n; j++) { sum += (float) fabs(a[i*n+j] - b[i*n+j]); } } return sum; } void print_mat(float* a, int n) { int i, j; for (i=0; i<n; i++) { for (j=0; j<n; j++) { printf("%.1f\t", a[i*n+j]); } printf("\n"); } printf("\n"); } int main() { srand(100); int n = N; size_t totalsize = sizeof(float)*n*n; float *a, *b, *c, *id; hipHostMalloc((void**) &a, totalsize); hipHostMalloc((void**) &b, totalsize); hipHostMalloc((void**) &c, totalsize); hipHostMalloc((void**) &id, totalsize); make_identity(id, 0, 0, n, n); // construct first matrix make_identity(a, 0, 0, n, n); // second matrix make_identity(b, 0, 0, n, n); // allocate on device float *dev_a, *dev_b, *dev_c; hipMalloc((void**) &dev_a, totalsize); hipMalloc((void**) &dev_b, totalsize); hipMalloc((void**) &dev_c, totalsize); // copy to device hipMemcpy(dev_a, a, totalsize, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, totalsize, hipMemcpyHostToDevice); // intermediate matrix product hipblasHandle_t handle; const float alpha = 1.0f; const float beta = 0.0f; hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, &alpha, dev_a, n, dev_b, n, &beta, dev_c, n); printf("ok\n"); hipDeviceSynchronize(); // bring product back to cpu hipMemcpy(c, dev_c, totalsize, hipMemcpyDeviceToHost); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipHostFree(a); hipHostFree(b); // check a against the result d printf("Sum: %f\n", rothVerf(c, id, n)); // cleanup and exit hipHostFree(c); hipHostFree(id); return 0; }
bf5080b06ec3a16ffadac90bfcdc2d42dd2a20d8.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cublas_v2.h> #include "cuda.h" #include <host_defines.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> #define BLOCK_SIZE 16 #define N 256 float getnum() { return rand()/((float) RAND_MAX); } __global__ void gpu_matrix_multiply(float *a, float *b, float *c, int n) { __shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; float tmp = 0.0f; int idx; for (int sub = 0; sub < gridDim.x; ++sub) // gridDim.x { idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if(idx >= n*n) { // n may not divisible by BLOCK_SIZE tile_a[threadIdx.y][threadIdx.x] = 0.0f; } else { tile_a[threadIdx.y][threadIdx.x] = a[idx]; } idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if(idx >= n*n) { tile_b[threadIdx.y][threadIdx.x] = 0.0f; } else { tile_b[threadIdx.y][threadIdx.x] = b[idx]; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if(row < n && col < n) { c[row * n + col] = tmp; } } void make_identity(float* a, int startx, int starty, int length, int n) { // fill a with the identity from start to end int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = (i-startx==j-starty) ? 1.0f : 0.0f; } } } void make_negidentity(float* a, int startx, int starty, int length, int n) { // fill a with the identity from start to end int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = (i-startx==j-starty) ? -1.0f : 0.0f; } } } void make_x(float* x, int length) { int i, j; for (i=0; i<length; i++) { for (j=0; j<length; j++) { x[i*length+j] = getnum(); } } } void make_zero(float* a, int startx, int starty, int length, int n) { int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = 0.0f; } } } void copy_x(float* a, float* x, int startx, int starty, int length, int n) { int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = x[(i-startx)*length+(j-starty)]; } } } void copy_2x(float* a, float* x, int startx, int starty, int length, int n) { int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = 2*x[(i-startx)*length+(j-starty)]; } } } void copy_negx(float* a, float* x, int startx, int starty, int length, int n) { int i, j; for (i=startx; i<length+startx; i++) { for (j=starty; j<length+starty; j++) { a[i*n+j] = (-1)*x[(i-startx)*length+(j-starty)]; } } } void make_result(float* a, int length) { int i, j; int half = length>>1; for (i=0; i<length; i++) { for (j=0; j<length; j++) { if (i == j) { if (i>=half) a[i*length+j] = -1.0f; else a[i*length+j] = 1.0f; } else a[i*length+j] = 0.0f; } } } float rothVerf(float* a, float* b, int n) { float sum = 0; int i, j; for (i=0; i<n; i++) { for (j=0; j<n; j++) { sum += (float) fabs(a[i*n+j] - b[i*n+j]); } } return sum; } void print_mat(float* a, int n) { int i, j; for (i=0; i<n; i++) { for (j=0; j<n; j++) { printf("%.1f\t", a[i*n+j]); } printf("\n"); } printf("\n"); } int main() { srand(100); int n = N; size_t totalsize = sizeof(float)*n*n; float *a, *b, *c, *id; cudaMallocHost((void**) &a, totalsize); cudaMallocHost((void**) &b, totalsize); cudaMallocHost((void**) &c, totalsize); cudaMallocHost((void**) &id, totalsize); make_identity(id, 0, 0, n, n); // construct first matrix make_identity(a, 0, 0, n, n); // second matrix make_identity(b, 0, 0, n, n); // allocate on device float *dev_a, *dev_b, *dev_c; cudaMalloc((void**) &dev_a, totalsize); cudaMalloc((void**) &dev_b, totalsize); cudaMalloc((void**) &dev_c, totalsize); // copy to device cudaMemcpy(dev_a, a, totalsize, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, totalsize, cudaMemcpyHostToDevice); // intermediate matrix product cublasHandle_t handle; const float alpha = 1.0f; const float beta = 0.0f; cublasSgemm_v2(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &alpha, dev_a, n, dev_b, n, &beta, dev_c, n); printf("ok\n"); cudaThreadSynchronize(); // bring product back to cpu cudaMemcpy(c, dev_c, totalsize, cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaFreeHost(a); cudaFreeHost(b); // check a against the result d printf("Sum: %f\n", rothVerf(c, id, n)); // cleanup and exit cudaFreeHost(c); cudaFreeHost(id); return 0; }
e2cd4ba1ef78dc3727c93cba84e064fc2de6546e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/permute_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #ifdef USE_ROCM template <typename Dtype> __global__ void PermuteKernel(const int nthreads, Dtype* const bottom_data, const bool forward, const int* permute_order, const int* old_steps, const int* new_steps, const int num_axes, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int temp_idx = index; int old_idx = 0; for (int i = 0; i < num_axes; ++i) { int order = permute_order[i]; old_idx += (temp_idx / new_steps[i]) * old_steps[order]; temp_idx %= new_steps[i]; } if (forward) { top_data[index] = bottom_data[old_idx]; } else { bottom_data[old_idx] = top_data[index]; } } } #endif template <typename Dtype> void PermuteLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (need_permute_) { Dtype* bottom_data = bottom[0]->mutable_gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); const int* permute_order = permute_order_.gpu_data(); const int* new_steps = new_steps_.gpu_data(); const int* old_steps = old_steps_.gpu_data(); bool forward = true; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PermuteKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, forward, permute_order, old_steps, new_steps, num_axes_, top_data); CUDA_POST_KERNEL_CHECK; #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_permute = program.get_kernel( CL_KERNEL_SELECT("PermuteKernel")); viennacl::ocl::enqueue( oclk_permute(count, WrapHandle((cl_mem) bottom_data, &ctx), (const int)forward, WrapHandle((cl_mem)permute_order, &ctx), WrapHandle((cl_mem)old_steps, &ctx), WrapHandle((cl_mem)new_steps, &ctx), num_axes_, WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); #endif // USE_GREENTEA } } else { // If there is no need to permute, we share data to save memory. top[0]->ShareData(*bottom[0]); } } template <typename Dtype> void PermuteLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { #ifdef USE_ROCM if (need_permute_) { Dtype* top_diff = top[0]->mutable_gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); const int* permute_order = permute_order_.gpu_data(); const int* new_steps = new_steps_.gpu_data(); const int* old_steps = old_steps_.gpu_data(); bool foward = false; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PermuteKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_diff, foward, permute_order, old_steps, new_steps, num_axes_, top_diff); CUDA_POST_KERNEL_CHECK; } else { // If there is no need to permute, we share diff to save memory. bottom[0]->ShareDiff(*top[0]); } #else this->Backward_cpu(top, propagate_down, bottom); // NOT_IMPLEMENTED; #endif // USE_ROCM } INSTANTIATE_LAYER_GPU_FUNCS(PermuteLayer); } // namespace caffe
e2cd4ba1ef78dc3727c93cba84e064fc2de6546e.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/permute_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #ifdef USE_CUDA template <typename Dtype> __global__ void PermuteKernel(const int nthreads, Dtype* const bottom_data, const bool forward, const int* permute_order, const int* old_steps, const int* new_steps, const int num_axes, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int temp_idx = index; int old_idx = 0; for (int i = 0; i < num_axes; ++i) { int order = permute_order[i]; old_idx += (temp_idx / new_steps[i]) * old_steps[order]; temp_idx %= new_steps[i]; } if (forward) { top_data[index] = bottom_data[old_idx]; } else { bottom_data[old_idx] = top_data[index]; } } } #endif template <typename Dtype> void PermuteLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (need_permute_) { Dtype* bottom_data = bottom[0]->mutable_gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); const int* permute_order = permute_order_.gpu_data(); const int* new_steps = new_steps_.gpu_data(); const int* old_steps = old_steps_.gpu_data(); bool forward = true; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) PermuteKernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, forward, permute_order, old_steps, new_steps, num_axes_, top_data); CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_permute = program.get_kernel( CL_KERNEL_SELECT("PermuteKernel")); viennacl::ocl::enqueue( oclk_permute(count, WrapHandle((cl_mem) bottom_data, &ctx), (const int)forward, WrapHandle((cl_mem)permute_order, &ctx), WrapHandle((cl_mem)old_steps, &ctx), WrapHandle((cl_mem)new_steps, &ctx), num_axes_, WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); #endif // USE_GREENTEA } } else { // If there is no need to permute, we share data to save memory. top[0]->ShareData(*bottom[0]); } } template <typename Dtype> void PermuteLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { #ifdef USE_CUDA if (need_permute_) { Dtype* top_diff = top[0]->mutable_gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); const int* permute_order = permute_order_.gpu_data(); const int* new_steps = new_steps_.gpu_data(); const int* old_steps = old_steps_.gpu_data(); bool foward = false; // NOLINT_NEXT_LINE(whitespace/operators) PermuteKernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_diff, foward, permute_order, old_steps, new_steps, num_axes_, top_diff); CUDA_POST_KERNEL_CHECK; } else { // If there is no need to permute, we share diff to save memory. bottom[0]->ShareDiff(*top[0]); } #else this->Backward_cpu(top, propagate_down, bottom); // NOT_IMPLEMENTED; #endif // USE_CUDA } INSTANTIATE_LAYER_GPU_FUNCS(PermuteLayer); } // namespace caffe
a602748e01bebcbcc1d5fd6a1a3cd15106693895.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #include "gpu_err_check.h" #include <float.h> const unsigned long int MAX_OPS = 10000000000; const int threads_per_block = 512; const int n_blocks = 1000; __global__ void baseline(unsigned long int n_ops) { unsigned long int num_iterations = n_ops / threads_per_block / n_blocks / 2; float res; for (unsigned long int i = 0; i < num_iterations; i++) { res = 3.0 * 4.0 + 6.0; } } __global__ void global(unsigned long int n_ops, float* a, float* b, float* c, float* res) { unsigned long int num_iterations = n_ops / threads_per_block / n_blocks / 2; int tid = threadIdx.x + blockDim.x * blockIdx.x; for (unsigned long int i = 0; i < num_iterations; i++) { res[tid] += a[tid] * b[tid] + c[tid]; } } __global__ void local(unsigned long int n_ops, float* a, float* b, float* c, float* res) { unsigned long int num_iterations = n_ops / threads_per_block / n_blocks / 2; int tid = threadIdx.x + blockDim.x * blockIdx.x; float local_a = a[tid]; float local_b = b[tid]; float local_c = c[tid]; float local_res = 0.0; for (unsigned long int i = 0; i < num_iterations; i++) { local_res += local_a * local_b + local_c; } res[tid] = local_res; } int main(){ hipFree(0); // avoid spoofing profiler. hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float* a = (float*) malloc(n_blocks * threads_per_block * sizeof(float)); float* b = (float*) malloc(n_blocks * threads_per_block * sizeof(float)); float* c = (float*) malloc(n_blocks * threads_per_block * sizeof(float)); for (int i = 0; i < n_blocks * threads_per_block; i++){ a[i] = 3.0; b[i] = 4.0; c[i] = 6.0; } float* d_a; float* d_b; float* d_c; float* d_res; printf("n_ops, global, local, baseline\n"); float milliseconds; for (unsigned long int i = 1; i <MAX_OPS; i*=2 ) { for (int k=0; k<10; k++) { printf("%lu,", i); hipEventRecord(start); hipLaunchKernelGGL(( baseline), dim3(n_blocks), dim3(threads_per_block), 0, 0, i); hipEventRecord(stop); hipEventSynchronize(stop); hipDeviceSynchronize(); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("%.7g,", milliseconds / 1000.0); hipMalloc((void**) &d_a, n_blocks * threads_per_block * sizeof(float)); hipMalloc((void**) &d_b, n_blocks * threads_per_block * sizeof(float)); hipMalloc((void**) &d_c, n_blocks * threads_per_block * sizeof(float)); hipMalloc((void**) &d_res, n_blocks * threads_per_block * sizeof(float)); hipMemcpy(d_a, a, n_blocks * threads_per_block * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, b, n_blocks * threads_per_block * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_c, c, n_blocks * threads_per_block * sizeof(float), hipMemcpyHostToDevice); hipEventRecord(start); hipLaunchKernelGGL(( global), dim3(n_blocks), dim3(threads_per_block) , 0, 0, i, d_a, d_b, d_c, d_res); hipEventRecord(stop); hipEventSynchronize(stop); hipDeviceSynchronize(); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("%.7g,", milliseconds / 1000.0); hipEventRecord(start); hipLaunchKernelGGL(( local), dim3(n_blocks), dim3(threads_per_block) , 0, 0, i, d_a, d_b, d_c, d_res); hipEventRecord(stop); hipEventSynchronize(stop); hipDeviceSynchronize(); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("%.7g\n", milliseconds / 1000.0); hipFree(&d_a); hipFree(&d_b); hipFree(&d_c); } } return 0; }
a602748e01bebcbcc1d5fd6a1a3cd15106693895.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <cuda.h> #include "gpu_err_check.h" #include <float.h> const unsigned long int MAX_OPS = 10000000000; const int threads_per_block = 512; const int n_blocks = 1000; __global__ void baseline(unsigned long int n_ops) { unsigned long int num_iterations = n_ops / threads_per_block / n_blocks / 2; float res; for (unsigned long int i = 0; i < num_iterations; i++) { res = 3.0 * 4.0 + 6.0; } } __global__ void global(unsigned long int n_ops, float* a, float* b, float* c, float* res) { unsigned long int num_iterations = n_ops / threads_per_block / n_blocks / 2; int tid = threadIdx.x + blockDim.x * blockIdx.x; for (unsigned long int i = 0; i < num_iterations; i++) { res[tid] += a[tid] * b[tid] + c[tid]; } } __global__ void local(unsigned long int n_ops, float* a, float* b, float* c, float* res) { unsigned long int num_iterations = n_ops / threads_per_block / n_blocks / 2; int tid = threadIdx.x + blockDim.x * blockIdx.x; float local_a = a[tid]; float local_b = b[tid]; float local_c = c[tid]; float local_res = 0.0; for (unsigned long int i = 0; i < num_iterations; i++) { local_res += local_a * local_b + local_c; } res[tid] = local_res; } int main(){ cudaFree(0); // avoid spoofing profiler. cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float* a = (float*) malloc(n_blocks * threads_per_block * sizeof(float)); float* b = (float*) malloc(n_blocks * threads_per_block * sizeof(float)); float* c = (float*) malloc(n_blocks * threads_per_block * sizeof(float)); for (int i = 0; i < n_blocks * threads_per_block; i++){ a[i] = 3.0; b[i] = 4.0; c[i] = 6.0; } float* d_a; float* d_b; float* d_c; float* d_res; printf("n_ops, global, local, baseline\n"); float milliseconds; for (unsigned long int i = 1; i <MAX_OPS; i*=2 ) { for (int k=0; k<10; k++) { printf("%lu,", i); cudaEventRecord(start); baseline<<<n_blocks, threads_per_block>>>(i); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaDeviceSynchronize(); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("%.7g,", milliseconds / 1000.0); cudaMalloc((void**) &d_a, n_blocks * threads_per_block * sizeof(float)); cudaMalloc((void**) &d_b, n_blocks * threads_per_block * sizeof(float)); cudaMalloc((void**) &d_c, n_blocks * threads_per_block * sizeof(float)); cudaMalloc((void**) &d_res, n_blocks * threads_per_block * sizeof(float)); cudaMemcpy(d_a, a, n_blocks * threads_per_block * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, n_blocks * threads_per_block * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, n_blocks * threads_per_block * sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(start); global<<< n_blocks, threads_per_block >>> (i, d_a, d_b, d_c, d_res); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaDeviceSynchronize(); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("%.7g,", milliseconds / 1000.0); cudaEventRecord(start); local<<< n_blocks, threads_per_block >>> (i, d_a, d_b, d_c, d_res); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaDeviceSynchronize(); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("%.7g\n", milliseconds / 1000.0); cudaFree(&d_a); cudaFree(&d_b); cudaFree(&d_c); } } return 0; }
5bf8a0c4e6cd753d8e7abed66a475b9173e5e89d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); thrust::device_vector<int> dv_in(idata,idata+n); thrust::device_vector<int> dv_out(idata,idata+n); timer().startGpuTimer(); thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); timer().endGpuTimer(); thrust::copy(dv_out.begin(),dv_out.end(),odata); } } }
5bf8a0c4e6cd753d8e7abed66a475b9173e5e89d.cu
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); thrust::device_vector<int> dv_in(idata,idata+n); thrust::device_vector<int> dv_out(idata,idata+n); timer().startGpuTimer(); thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); timer().endGpuTimer(); thrust::copy(dv_out.begin(),dv_out.end(),odata); } } }
a5e4bfe790b3c523008a6ee295037bca25f2b860.hip
// !!! This is a file automatically generated by hipify!!! /*========================================================================== M A I N * Copyright (c) 2008, NetSysLab at the University of British Columbia * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the University nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NetSysLab ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NetSysLab BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. DESCRIPTION Main entry. ==========================================================================*/ /*========================================================================== INCLUDES ==========================================================================*/ // system #include <stdlib.h> #include <stdio.h> #include <string.h> // project #include <cust.h> #include <storeGPU.h> #include <storeCPU.h> /*========================================================================== DATA DECLARATIONS ==========================================================================*/ /*-------------------------------------------------------------------------- TYPE DEFINITIONS --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- FUNCTION PROTOTYPES --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- CONSTANTS --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- GLOBAL VARIABLES --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- MACROS --------------------------------------------------------------------------*/ /*========================================================================== FUNCTIONS ==========================================================================*/ /*-------------------------------------------------------------------------- GLOBAL FUNCTIONS --------------------------------------------------------------------------*/ /*=========================================================================== FUNCTION SG_PRINT_TIME_BREAKDOWN DESCRIPTION Prints out the given time breakdown parameter DEPENDENCIES None RETURN VALUE None ===========================================================================*/ static void print_gpu_time_breakdown( sg_time_breakdown_type* time_breakdown, float input_buffer_alloc_time, float gpu_init_time ) { printf("\n== GPU Timing ==\n"); printf("GPU init : %f\n", gpu_init_time); printf("Host input buffer alloc : %f\n", input_buffer_alloc_time); printf("-----\n"); printf("Host output buffer alloc : %f\n", time_breakdown->host_output_buffer_alloc_time); printf("GPU memory alloc : %f\n", time_breakdown->device_mem_alloc_time); printf("Data copy in : %f\n", time_breakdown->copy_in_time); printf("Kernel execution : %f\n", time_breakdown->exec_time); printf("Data copy out : %f\n", time_breakdown->copy_out_time); printf("Last hasing stage : %f\n", time_breakdown->last_stage_time); } #ifdef FEATURE_RUN_OVERLAP_TEST /*=========================================================================== FUNCTION run_md5_overlap_test DESCRIPTION run the test DEPENDENCIES None RETURN VALUE None ===========================================================================*/ void run_md5_overlap_test( ) { //**** Variables ****// float host_input_buffer_alloc_time, gpu_init_time; sg_time_breakdown_type gpu_time_breakdown; unsigned char* sc_output; unsigned char* sg_output; unsigned char* buffer; unsigned int timer; int sg_output_size; int sc_output_size; printf( "MD5 Overlap Test\n\n" ); //**** create the timer ****// timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); //** GPU initialization timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* GPU device initialization */ sg_init(); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); gpu_init_time = cutGetTimerValue(timer); //**** Host input buffer allocation timing ****// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* allocate test buffer */ buffer = (unsigned char*) sg_malloc(TEST_MEM_SIZE_OVERLAP); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); host_input_buffer_alloc_time = cutGetTimerValue(timer); //**** initialize test buffer with random data ****// for( unsigned int i = 0; i < TEST_MEM_SIZE_OVERLAP; ++i) { buffer[i] = i; } /***************/ /***** GPU *****/ /***************/ //** MD5 timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* run GPU version */ sg_md5_overlap(buffer, TEST_MEM_SIZE_OVERLAP, CHUNK_SIZE, OFFSET, &sg_output, &sg_output_size, &gpu_time_breakdown); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); print_gpu_time_breakdown( &gpu_time_breakdown, host_input_buffer_alloc_time, gpu_init_time ); printf( "GPU Proc. Time (gpu init and input alloc are not included): %f \n", cutGetTimerValue(timer)); /***************/ /***** CPU *****/ /***************/ //**** start timer for cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run CPU version ****// sc_md5_overlap(buffer, TEST_MEM_SIZE_OVERLAP, CHUNK_SIZE, OFFSET, &sc_output, &sc_output_size); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Processing time(ms): %f \n", cutGetTimerValue( timer)); if(sc_output_size != sg_output_size){ printf( "\nGPU and CPU didn't converse to the same output size:\n"); printf( "\nGPU output size: %d\n", sg_output_size); printf( "\nCPU output size: %d\n", sc_output_size); } else { printf( "\nOutput size: %d\n", sc_output_size); } //**** check if the results are equivalent ****// CUTBoolean res = cutCompareub( sg_output, sc_output, sg_output_size); //**** print the results ****// printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("CPU GPU\n"); for ( int i = sc_output_size - 4; i < sc_output_size; i++) { printf("%X %X\n",sc_output[i], sg_output[i]); } sg_free(buffer); //sg_free(sg_output); hipHostFree(sg_output ); free(sc_output); } /*=========================================================================== FUNCTION run_sha1_overlap_test DESCRIPTION run the test DEPENDENCIES None RETURN VALUE None ===========================================================================*/ void run_sha1_overlap_test( ) { //**** Variables ****// float host_input_buffer_alloc_time, gpu_init_time; sg_time_breakdown_type gpu_time_breakdown; unsigned char* sc_output; unsigned char* sg_output; unsigned char* buffer; unsigned int timer; int sg_output_size; int sc_output_size; printf( "SHA1 Overlap Test\n\n" ); //**** create the timer ****// timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); //** GPU initialization timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* GPU device initialization */ sg_init(); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); gpu_init_time = cutGetTimerValue(timer); //**** Host input buffer allocation timing ****// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* allocate test buffer */ buffer = (unsigned char*) sg_malloc(TEST_MEM_SIZE_OVERLAP); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); host_input_buffer_alloc_time = cutGetTimerValue(timer); //**** initialize test buffer with random data ****// for( unsigned int i = 0; i < TEST_MEM_SIZE_OVERLAP; ++i) { buffer[i] = i; } /***************/ /***** GPU *****/ /***************/ //** SHA1 timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* run GPU version */ sg_sha1_overlap(buffer, TEST_MEM_SIZE_OVERLAP, CHUNK_SIZE, OFFSET, &sg_output, &sg_output_size, &gpu_time_breakdown); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); print_gpu_time_breakdown( &gpu_time_breakdown, host_input_buffer_alloc_time, gpu_init_time ); printf( "GPU Proc. Time (gpu init and input alloc are not included): %f \n", cutGetTimerValue(timer)); /***************/ /***** CPU *****/ /***************/ //**** start timer for cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run CPU version ****// sc_sha1_overlap(buffer, TEST_MEM_SIZE_OVERLAP, CHUNK_SIZE, OFFSET, &sc_output, &sc_output_size); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Processing time(ms): %f \n", cutGetTimerValue( timer)); if(sc_output_size != sg_output_size){ printf( "\nGPU and CPU didn't converse to the same output size:\n"); printf( "\nGPU output size: %d\n", sg_output_size); printf( "\nCPU output size: %d\n", sc_output_size); } else { printf( "\nOutput size: %d\n", sc_output_size); } //**** check if the results are equivalent ****// CUTBoolean res = cutCompareub( sg_output, sc_output, sg_output_size); //**** print the results ****// printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("CPU GPU\n"); for ( int i = sc_output_size - 4; i < sc_output_size; i++) { printf("%X %X\n",sc_output[i], sg_output[i]); } sg_free(buffer); //sg_free(sg_output); hipHostFree(sg_output ); free(sc_output); } #else /* FEATURE_RUN_OVERLAP_TEST */ /*=========================================================================== FUNCTION run_md5_test DESCRIPTION run the test DEPENDENCIES None RETURN VALUE None ===========================================================================*/ void run_md5_test( ) { //**** Variables ****// unsigned char *sc_output, *sc_single_output; unsigned char *sg_output; unsigned char *buffer; unsigned int timer; int sg_output_size; int sc_output_size; float host_input_buffer_alloc_time, gpu_init_time; sg_time_breakdown_type gpu_time_breakdown; printf( "MD5 Test\n\n" ); //**** create the timer ****// timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); //** GPU initialization timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* GPU device initialization */ sg_init(); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); gpu_init_time = cutGetTimerValue(timer); //**** Host input buffer allocation timing ****// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* allocate test buffer */ buffer = (unsigned char*) sg_malloc(TEST_MEM_SIZE); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); host_input_buffer_alloc_time = cutGetTimerValue(timer); //**** initialize test buffer with random data ****// for( unsigned int i = 0; i < TEST_MEM_SIZE; ++i) { buffer[i] = i; } /***************/ /***** GPU *****/ /***************/ //** MD5 timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* run GPU version */ sg_md5(buffer, TEST_MEM_SIZE, &sg_output, &sg_output_size, &gpu_time_breakdown); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); print_gpu_time_breakdown( &gpu_time_breakdown, host_input_buffer_alloc_time, gpu_init_time ); printf( "GPU Proc. Time (gpu init and input alloc are not included): %f \n", cutGetTimerValue(timer)); /***************/ /***** CPU *****/ /***************/ //**** start timer for cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run CPU version ****// sc_md5(buffer, TEST_MEM_SIZE, &sc_output, &sc_output_size); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Processing time(ms): %f \n", cutGetTimerValue( timer)); /*****************************/ /***** CPU Single Thread *****/ /*****************************/ //**** start timer for single thread cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run Single Thread CPU version ****// sc_md5_standard(buffer, TEST_MEM_SIZE, &sc_single_output); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Single Thread Processing time(ms): %f \n", cutGetTimerValue( timer)); if(sc_output_size != sg_output_size){ printf( "\nGPU and CPU didn't converse to the same output size:\n"); printf( "\nGPU output size: %d\n", sg_output_size); printf( "\nCPU output size: %d\n", sc_output_size); } else { printf( "\nOutput size: %d\n", sc_output_size); } //**** check if the results are equivalent ****// CUTBoolean res = cutCompareub( sg_output, sc_output, sg_output_size); //**** print the results ****// printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("CPU GPU\n"); for ( int i = sc_output_size - 4; i < sc_output_size; i++) { printf("%X %X\n",sc_output[i], sg_output[i]); } sg_free(buffer); free(sg_output); /* We dont need to free this using sg_free, it will always be allocated using malloc. will try to come up with a cleaner way to make things more clear. */ free(sc_output); } /*=========================================================================== FUNCTION run_sha1_test DESCRIPTION run the sha1 test DEPENDENCIES None RETURN VALUE None ===========================================================================*/ void run_sha1_test( ) { //**** Variables ****// unsigned char *sc_output, *sc_single_output; unsigned char *sg_output; unsigned char *buffer; unsigned int timer; int sg_output_size; int sc_output_size; float host_input_buffer_alloc_time, gpu_init_time; sg_time_breakdown_type gpu_time_breakdown; printf( "SHA1 Test\n\n" ); //**** create the timer ****// timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); //** GPU initialization timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* GPU device initialization */ sg_init(); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); gpu_init_time = cutGetTimerValue(timer); //**** Host input buffer allocation timing ****// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* allocate test buffer */ buffer = (unsigned char*) sg_malloc(TEST_MEM_SIZE); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); host_input_buffer_alloc_time = cutGetTimerValue(timer); //**** initialize test buffer with random data ****// for( unsigned int i = 0; i < TEST_MEM_SIZE; ++i) { buffer[i] = i; } /***************/ /***** GPU *****/ /***************/ //** SHA1 timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* run GPU version */ sg_sha1(buffer, TEST_MEM_SIZE, &sg_output, &sg_output_size, &gpu_time_breakdown); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); print_gpu_time_breakdown( &gpu_time_breakdown, host_input_buffer_alloc_time, gpu_init_time ); printf( "GPU Proc. Time (gpu init and input alloc are not included): %f \n", cutGetTimerValue(timer)); /***************/ /***** CPU *****/ /***************/ //**** start timer for cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run CPU version ****// sc_sha1(buffer, TEST_MEM_SIZE, &sc_output, &sc_output_size); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Processing time(ms): %f \n", cutGetTimerValue( timer)); /*****************************/ /***** CPU Single Thread *****/ /*****************************/ //**** start timer for single thread cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run Single Thread CPU version ****// sc_sha1_standard(buffer, TEST_MEM_SIZE, &sc_single_output); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Single Thread Processing time(ms): %f \n", cutGetTimerValue( timer)); if(sc_output_size != sg_output_size){ printf( "\nGPU and CPU didn't converse to the same output size:\n"); printf( "\nGPU output size: %d\n", sg_output_size); printf( "\nCPU output size: %d\n", sc_output_size); } else { printf( "\nOutput size: %d\n", sc_output_size); } //**** check if the results are equivalent ****// CUTBoolean res = cutCompareub( sg_output, sc_output, sg_output_size); //**** print the results ****// printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("CPU GPU\n"); for ( int i = sc_output_size - 4; i < sc_output_size; i++) { printf("%X %X\n",sc_output[i], sg_output[i]); } sg_free(buffer); free(sg_output); /* We dont need to free this using sg_free, it will always be allocated using malloc. will try to come up with a cleaner way to make things more clear. */ free(sc_output); } /* void run_sha1_test( ) { */ /* //\**** Variables ****\// */ /* unsigned int timer = 0; */ /* unsigned char* buffer; */ /* unsigned char* sg_output; */ /* int sg_output_size; */ /* unsigned char *sc_output, *sc_single_output; */ /* int sc_output_size; */ /* printf( "SHA1 Test\n\n" ); */ /* //\**** host memory management ****\// */ /* // allocate test buffer */ /* buffer = (unsigned char*) sg_malloc(TEST_MEM_SIZE); */ /* //\**** initialize test buffer with random data ****\// */ /* for( unsigned int i = 0; i < TEST_MEM_SIZE; ++i) { */ /* buffer[i] = i; */ /* } */ /* //\**** create the timer ****\// */ /* timer = 0; */ /* CUT_SAFE_CALL( cutCreateTimer( &timer)); */ /* /\***************\/ */ /* /\***** GPU *****\/ */ /* /\***************\/ */ /* //\**** start timer for GPU timing ****\// */ /* CUT_SAFE_CALL( cutResetTimer(timer) ); */ /* CUT_SAFE_CALL( cutStartTimer( timer)); */ /* //\**** run GPU version ****\// */ /* sg_sha1(buffer, TEST_MEM_SIZE, &sg_output, &sg_output_size); */ /* //\**** stop the timer ****\// */ /* CUT_SAFE_CALL( cutStopTimer( timer)); */ /* //\**** print results ****\// */ /* printf( "GPU Processing time(ms): %f \n", cutGetTimerValue( timer)); */ /* /\***************\/ */ /* /\***** CPU *****\/ */ /* /\***************\/ */ /* //\**** start timer for cpu ****\// */ /* CUT_SAFE_CALL( cutResetTimer( timer ) ); */ /* CUT_SAFE_CALL( cutStartTimer( timer ) ); */ /* //\**** run CPU version ****\// */ /* sc_sha1(buffer, TEST_MEM_SIZE, &sc_output, &sc_output_size); */ /* //\**** stop the timer ****\// */ /* CUT_SAFE_CALL( cutStopTimer( timer)); */ /* printf( "CPU Processing time(ms): %f \n", cutGetTimerValue( timer)); */ /* /\*****************************\/ */ /* /\***** CPU Single Thread *****\/ */ /* /\*****************************\/ */ /* //\**** start timer for single thread cpu ****\// */ /* CUT_SAFE_CALL( cutResetTimer( timer ) ); */ /* CUT_SAFE_CALL( cutStartTimer( timer ) ); */ /* //\**** run CPU version ****\// */ /* sc_sha1_standard(buffer, TEST_MEM_SIZE, &sc_single_output); */ /* //\**** stop the timer ****\// */ /* CUT_SAFE_CALL( cutStopTimer( timer)); */ /* printf( "CPU Single Thread Processing time(ms): %f \n", */ /* cutGetTimerValue( timer)); */ /* if(sc_output_size != sg_output_size){ */ /* printf( "\nGPU and CPU didn't converse to the same output size:\n"); */ /* printf( "\nGPU output size: %d\n", sg_output_size); */ /* printf( "\nCPU output size: %d\n", sc_output_size); */ /* } else { */ /* printf( "\nOutput size: %d\n", sc_output_size); */ /* } */ /* //\**** check if the results are equivalent ****\// */ /* CUTBoolean res = cutCompareub( sg_output, */ /* sc_output, */ /* sg_output_size); */ /* //\**** print the results ****\// */ /* printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); */ /* printf("CPU GPU\n"); */ /* for ( int i = sc_output_size - 4; i < sc_output_size; i++) { */ /* printf("%X %X\n",sc_output[i], sg_output[i]); */ /* } */ /* sg_free(buffer); */ /* free(sg_output); /\* We dont need to free this using sg_free, it will always be */ /* allocated using malloc. I will try to come up with a */ /* cleaner way to make things more clear. *\/ */ /* free(sc_output); */ /* } */ #endif /* FEATURE_RUN_OVERLAP_TEST */ /*-------------------------------------------------------------------------- GLOBAL FUNCTIONS --------------------------------------------------------------------------*/ /*=========================================================================== FUNCTION main DESCRIPTION main entry of the program DEPENDENCIES None RETURN VALUE None ===========================================================================*/ int main( int argc, char** argv) { #ifdef FEATURE_SHARED_MEMORY printf("Shared Memory Enabled\n"); #endif #ifdef FEATURE_PINNED_MODE printf("Pinned Memory Enabled\n"); #endif #ifdef FEATURE_REDUCED_HASH_SIZE printf("Reduced Hash Size Enabled\n"); #endif #ifdef FEATURE_RUN_OVERLAP_TEST #ifdef FEATURE_RUN_SHA1 printf("Running SHA1 Overlap Test..\n"); run_sha1_overlap_test( ); #else printf("Running MD5 Overlap Test..\n"); run_md5_overlap_test( ); #endif // FEATURE_RUN_SHA1 #else #ifdef FEATURE_RUN_SHA1 printf("Running SHA1 Non-Overlap Test..\n"); run_sha1_test( ); #else printf("Running MD5 Non-Overlap Test..\n"); run_md5_test( ); #endif // FEATURE_RUN_SHA1 #endif // FEATURE_RUN_OVERLAP_TEST }
a5e4bfe790b3c523008a6ee295037bca25f2b860.cu
/*========================================================================== M A I N * Copyright (c) 2008, NetSysLab at the University of British Columbia * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the University nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NetSysLab ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NetSysLab BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. DESCRIPTION Main entry. ==========================================================================*/ /*========================================================================== INCLUDES ==========================================================================*/ // system #include <stdlib.h> #include <stdio.h> #include <string.h> // project #include <cust.h> #include <storeGPU.h> #include <storeCPU.h> /*========================================================================== DATA DECLARATIONS ==========================================================================*/ /*-------------------------------------------------------------------------- TYPE DEFINITIONS --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- FUNCTION PROTOTYPES --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- CONSTANTS --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- GLOBAL VARIABLES --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- MACROS --------------------------------------------------------------------------*/ /*========================================================================== FUNCTIONS ==========================================================================*/ /*-------------------------------------------------------------------------- GLOBAL FUNCTIONS --------------------------------------------------------------------------*/ /*=========================================================================== FUNCTION SG_PRINT_TIME_BREAKDOWN DESCRIPTION Prints out the given time breakdown parameter DEPENDENCIES None RETURN VALUE None ===========================================================================*/ static void print_gpu_time_breakdown( sg_time_breakdown_type* time_breakdown, float input_buffer_alloc_time, float gpu_init_time ) { printf("\n== GPU Timing ==\n"); printf("GPU init : %f\n", gpu_init_time); printf("Host input buffer alloc : %f\n", input_buffer_alloc_time); printf("-----\n"); printf("Host output buffer alloc : %f\n", time_breakdown->host_output_buffer_alloc_time); printf("GPU memory alloc : %f\n", time_breakdown->device_mem_alloc_time); printf("Data copy in : %f\n", time_breakdown->copy_in_time); printf("Kernel execution : %f\n", time_breakdown->exec_time); printf("Data copy out : %f\n", time_breakdown->copy_out_time); printf("Last hasing stage : %f\n", time_breakdown->last_stage_time); } #ifdef FEATURE_RUN_OVERLAP_TEST /*=========================================================================== FUNCTION run_md5_overlap_test DESCRIPTION run the test DEPENDENCIES None RETURN VALUE None ===========================================================================*/ void run_md5_overlap_test( ) { //**** Variables ****// float host_input_buffer_alloc_time, gpu_init_time; sg_time_breakdown_type gpu_time_breakdown; unsigned char* sc_output; unsigned char* sg_output; unsigned char* buffer; unsigned int timer; int sg_output_size; int sc_output_size; printf( "MD5 Overlap Test\n\n" ); //**** create the timer ****// timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); //** GPU initialization timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* GPU device initialization */ sg_init(); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); gpu_init_time = cutGetTimerValue(timer); //**** Host input buffer allocation timing ****// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* allocate test buffer */ buffer = (unsigned char*) sg_malloc(TEST_MEM_SIZE_OVERLAP); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); host_input_buffer_alloc_time = cutGetTimerValue(timer); //**** initialize test buffer with random data ****// for( unsigned int i = 0; i < TEST_MEM_SIZE_OVERLAP; ++i) { buffer[i] = i; } /***************/ /***** GPU *****/ /***************/ //** MD5 timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* run GPU version */ sg_md5_overlap(buffer, TEST_MEM_SIZE_OVERLAP, CHUNK_SIZE, OFFSET, &sg_output, &sg_output_size, &gpu_time_breakdown); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); print_gpu_time_breakdown( &gpu_time_breakdown, host_input_buffer_alloc_time, gpu_init_time ); printf( "GPU Proc. Time (gpu init and input alloc are not included): %f \n", cutGetTimerValue(timer)); /***************/ /***** CPU *****/ /***************/ //**** start timer for cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run CPU version ****// sc_md5_overlap(buffer, TEST_MEM_SIZE_OVERLAP, CHUNK_SIZE, OFFSET, &sc_output, &sc_output_size); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Processing time(ms): %f \n", cutGetTimerValue( timer)); if(sc_output_size != sg_output_size){ printf( "\nGPU and CPU didn't converse to the same output size:\n"); printf( "\nGPU output size: %d\n", sg_output_size); printf( "\nCPU output size: %d\n", sc_output_size); } else { printf( "\nOutput size: %d\n", sc_output_size); } //**** check if the results are equivalent ****// CUTBoolean res = cutCompareub( sg_output, sc_output, sg_output_size); //**** print the results ****// printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("CPU GPU\n"); for ( int i = sc_output_size - 4; i < sc_output_size; i++) { printf("%X %X\n",sc_output[i], sg_output[i]); } sg_free(buffer); //sg_free(sg_output); cudaFreeHost(sg_output ); free(sc_output); } /*=========================================================================== FUNCTION run_sha1_overlap_test DESCRIPTION run the test DEPENDENCIES None RETURN VALUE None ===========================================================================*/ void run_sha1_overlap_test( ) { //**** Variables ****// float host_input_buffer_alloc_time, gpu_init_time; sg_time_breakdown_type gpu_time_breakdown; unsigned char* sc_output; unsigned char* sg_output; unsigned char* buffer; unsigned int timer; int sg_output_size; int sc_output_size; printf( "SHA1 Overlap Test\n\n" ); //**** create the timer ****// timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); //** GPU initialization timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* GPU device initialization */ sg_init(); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); gpu_init_time = cutGetTimerValue(timer); //**** Host input buffer allocation timing ****// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* allocate test buffer */ buffer = (unsigned char*) sg_malloc(TEST_MEM_SIZE_OVERLAP); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); host_input_buffer_alloc_time = cutGetTimerValue(timer); //**** initialize test buffer with random data ****// for( unsigned int i = 0; i < TEST_MEM_SIZE_OVERLAP; ++i) { buffer[i] = i; } /***************/ /***** GPU *****/ /***************/ //** SHA1 timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* run GPU version */ sg_sha1_overlap(buffer, TEST_MEM_SIZE_OVERLAP, CHUNK_SIZE, OFFSET, &sg_output, &sg_output_size, &gpu_time_breakdown); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); print_gpu_time_breakdown( &gpu_time_breakdown, host_input_buffer_alloc_time, gpu_init_time ); printf( "GPU Proc. Time (gpu init and input alloc are not included): %f \n", cutGetTimerValue(timer)); /***************/ /***** CPU *****/ /***************/ //**** start timer for cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run CPU version ****// sc_sha1_overlap(buffer, TEST_MEM_SIZE_OVERLAP, CHUNK_SIZE, OFFSET, &sc_output, &sc_output_size); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Processing time(ms): %f \n", cutGetTimerValue( timer)); if(sc_output_size != sg_output_size){ printf( "\nGPU and CPU didn't converse to the same output size:\n"); printf( "\nGPU output size: %d\n", sg_output_size); printf( "\nCPU output size: %d\n", sc_output_size); } else { printf( "\nOutput size: %d\n", sc_output_size); } //**** check if the results are equivalent ****// CUTBoolean res = cutCompareub( sg_output, sc_output, sg_output_size); //**** print the results ****// printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("CPU GPU\n"); for ( int i = sc_output_size - 4; i < sc_output_size; i++) { printf("%X %X\n",sc_output[i], sg_output[i]); } sg_free(buffer); //sg_free(sg_output); cudaFreeHost(sg_output ); free(sc_output); } #else /* FEATURE_RUN_OVERLAP_TEST */ /*=========================================================================== FUNCTION run_md5_test DESCRIPTION run the test DEPENDENCIES None RETURN VALUE None ===========================================================================*/ void run_md5_test( ) { //**** Variables ****// unsigned char *sc_output, *sc_single_output; unsigned char *sg_output; unsigned char *buffer; unsigned int timer; int sg_output_size; int sc_output_size; float host_input_buffer_alloc_time, gpu_init_time; sg_time_breakdown_type gpu_time_breakdown; printf( "MD5 Test\n\n" ); //**** create the timer ****// timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); //** GPU initialization timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* GPU device initialization */ sg_init(); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); gpu_init_time = cutGetTimerValue(timer); //**** Host input buffer allocation timing ****// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* allocate test buffer */ buffer = (unsigned char*) sg_malloc(TEST_MEM_SIZE); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); host_input_buffer_alloc_time = cutGetTimerValue(timer); //**** initialize test buffer with random data ****// for( unsigned int i = 0; i < TEST_MEM_SIZE; ++i) { buffer[i] = i; } /***************/ /***** GPU *****/ /***************/ //** MD5 timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* run GPU version */ sg_md5(buffer, TEST_MEM_SIZE, &sg_output, &sg_output_size, &gpu_time_breakdown); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); print_gpu_time_breakdown( &gpu_time_breakdown, host_input_buffer_alloc_time, gpu_init_time ); printf( "GPU Proc. Time (gpu init and input alloc are not included): %f \n", cutGetTimerValue(timer)); /***************/ /***** CPU *****/ /***************/ //**** start timer for cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run CPU version ****// sc_md5(buffer, TEST_MEM_SIZE, &sc_output, &sc_output_size); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Processing time(ms): %f \n", cutGetTimerValue( timer)); /*****************************/ /***** CPU Single Thread *****/ /*****************************/ //**** start timer for single thread cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run Single Thread CPU version ****// sc_md5_standard(buffer, TEST_MEM_SIZE, &sc_single_output); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Single Thread Processing time(ms): %f \n", cutGetTimerValue( timer)); if(sc_output_size != sg_output_size){ printf( "\nGPU and CPU didn't converse to the same output size:\n"); printf( "\nGPU output size: %d\n", sg_output_size); printf( "\nCPU output size: %d\n", sc_output_size); } else { printf( "\nOutput size: %d\n", sc_output_size); } //**** check if the results are equivalent ****// CUTBoolean res = cutCompareub( sg_output, sc_output, sg_output_size); //**** print the results ****// printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("CPU GPU\n"); for ( int i = sc_output_size - 4; i < sc_output_size; i++) { printf("%X %X\n",sc_output[i], sg_output[i]); } sg_free(buffer); free(sg_output); /* We dont need to free this using sg_free, it will always be allocated using malloc. will try to come up with a cleaner way to make things more clear. */ free(sc_output); } /*=========================================================================== FUNCTION run_sha1_test DESCRIPTION run the sha1 test DEPENDENCIES None RETURN VALUE None ===========================================================================*/ void run_sha1_test( ) { //**** Variables ****// unsigned char *sc_output, *sc_single_output; unsigned char *sg_output; unsigned char *buffer; unsigned int timer; int sg_output_size; int sc_output_size; float host_input_buffer_alloc_time, gpu_init_time; sg_time_breakdown_type gpu_time_breakdown; printf( "SHA1 Test\n\n" ); //**** create the timer ****// timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); //** GPU initialization timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* GPU device initialization */ sg_init(); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); gpu_init_time = cutGetTimerValue(timer); //**** Host input buffer allocation timing ****// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* allocate test buffer */ buffer = (unsigned char*) sg_malloc(TEST_MEM_SIZE); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); host_input_buffer_alloc_time = cutGetTimerValue(timer); //**** initialize test buffer with random data ****// for( unsigned int i = 0; i < TEST_MEM_SIZE; ++i) { buffer[i] = i; } /***************/ /***** GPU *****/ /***************/ //** SHA1 timing **// CUT_SAFE_CALL(cutResetTimer(timer)); CUT_SAFE_CALL(cutStartTimer(timer)); /* run GPU version */ sg_sha1(buffer, TEST_MEM_SIZE, &sg_output, &sg_output_size, &gpu_time_breakdown); /* stop the timer */ CUT_SAFE_CALL(cutStopTimer(timer)); print_gpu_time_breakdown( &gpu_time_breakdown, host_input_buffer_alloc_time, gpu_init_time ); printf( "GPU Proc. Time (gpu init and input alloc are not included): %f \n", cutGetTimerValue(timer)); /***************/ /***** CPU *****/ /***************/ //**** start timer for cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run CPU version ****// sc_sha1(buffer, TEST_MEM_SIZE, &sc_output, &sc_output_size); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Processing time(ms): %f \n", cutGetTimerValue( timer)); /*****************************/ /***** CPU Single Thread *****/ /*****************************/ //**** start timer for single thread cpu ****// CUT_SAFE_CALL( cutResetTimer( timer ) ); CUT_SAFE_CALL( cutStartTimer( timer ) ); //**** run Single Thread CPU version ****// sc_sha1_standard(buffer, TEST_MEM_SIZE, &sc_single_output); //**** stop the timer ****// CUT_SAFE_CALL( cutStopTimer( timer)); printf( "CPU Single Thread Processing time(ms): %f \n", cutGetTimerValue( timer)); if(sc_output_size != sg_output_size){ printf( "\nGPU and CPU didn't converse to the same output size:\n"); printf( "\nGPU output size: %d\n", sg_output_size); printf( "\nCPU output size: %d\n", sc_output_size); } else { printf( "\nOutput size: %d\n", sc_output_size); } //**** check if the results are equivalent ****// CUTBoolean res = cutCompareub( sg_output, sc_output, sg_output_size); //**** print the results ****// printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("CPU GPU\n"); for ( int i = sc_output_size - 4; i < sc_output_size; i++) { printf("%X %X\n",sc_output[i], sg_output[i]); } sg_free(buffer); free(sg_output); /* We dont need to free this using sg_free, it will always be allocated using malloc. will try to come up with a cleaner way to make things more clear. */ free(sc_output); } /* void run_sha1_test( ) { */ /* //\**** Variables ****\// */ /* unsigned int timer = 0; */ /* unsigned char* buffer; */ /* unsigned char* sg_output; */ /* int sg_output_size; */ /* unsigned char *sc_output, *sc_single_output; */ /* int sc_output_size; */ /* printf( "SHA1 Test\n\n" ); */ /* //\**** host memory management ****\// */ /* // allocate test buffer */ /* buffer = (unsigned char*) sg_malloc(TEST_MEM_SIZE); */ /* //\**** initialize test buffer with random data ****\// */ /* for( unsigned int i = 0; i < TEST_MEM_SIZE; ++i) { */ /* buffer[i] = i; */ /* } */ /* //\**** create the timer ****\// */ /* timer = 0; */ /* CUT_SAFE_CALL( cutCreateTimer( &timer)); */ /* /\***************\/ */ /* /\***** GPU *****\/ */ /* /\***************\/ */ /* //\**** start timer for GPU timing ****\// */ /* CUT_SAFE_CALL( cutResetTimer(timer) ); */ /* CUT_SAFE_CALL( cutStartTimer( timer)); */ /* //\**** run GPU version ****\// */ /* sg_sha1(buffer, TEST_MEM_SIZE, &sg_output, &sg_output_size); */ /* //\**** stop the timer ****\// */ /* CUT_SAFE_CALL( cutStopTimer( timer)); */ /* //\**** print results ****\// */ /* printf( "GPU Processing time(ms): %f \n", cutGetTimerValue( timer)); */ /* /\***************\/ */ /* /\***** CPU *****\/ */ /* /\***************\/ */ /* //\**** start timer for cpu ****\// */ /* CUT_SAFE_CALL( cutResetTimer( timer ) ); */ /* CUT_SAFE_CALL( cutStartTimer( timer ) ); */ /* //\**** run CPU version ****\// */ /* sc_sha1(buffer, TEST_MEM_SIZE, &sc_output, &sc_output_size); */ /* //\**** stop the timer ****\// */ /* CUT_SAFE_CALL( cutStopTimer( timer)); */ /* printf( "CPU Processing time(ms): %f \n", cutGetTimerValue( timer)); */ /* /\*****************************\/ */ /* /\***** CPU Single Thread *****\/ */ /* /\*****************************\/ */ /* //\**** start timer for single thread cpu ****\// */ /* CUT_SAFE_CALL( cutResetTimer( timer ) ); */ /* CUT_SAFE_CALL( cutStartTimer( timer ) ); */ /* //\**** run CPU version ****\// */ /* sc_sha1_standard(buffer, TEST_MEM_SIZE, &sc_single_output); */ /* //\**** stop the timer ****\// */ /* CUT_SAFE_CALL( cutStopTimer( timer)); */ /* printf( "CPU Single Thread Processing time(ms): %f \n", */ /* cutGetTimerValue( timer)); */ /* if(sc_output_size != sg_output_size){ */ /* printf( "\nGPU and CPU didn't converse to the same output size:\n"); */ /* printf( "\nGPU output size: %d\n", sg_output_size); */ /* printf( "\nCPU output size: %d\n", sc_output_size); */ /* } else { */ /* printf( "\nOutput size: %d\n", sc_output_size); */ /* } */ /* //\**** check if the results are equivalent ****\// */ /* CUTBoolean res = cutCompareub( sg_output, */ /* sc_output, */ /* sg_output_size); */ /* //\**** print the results ****\// */ /* printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); */ /* printf("CPU GPU\n"); */ /* for ( int i = sc_output_size - 4; i < sc_output_size; i++) { */ /* printf("%X %X\n",sc_output[i], sg_output[i]); */ /* } */ /* sg_free(buffer); */ /* free(sg_output); /\* We dont need to free this using sg_free, it will always be */ /* allocated using malloc. I will try to come up with a */ /* cleaner way to make things more clear. *\/ */ /* free(sc_output); */ /* } */ #endif /* FEATURE_RUN_OVERLAP_TEST */ /*-------------------------------------------------------------------------- GLOBAL FUNCTIONS --------------------------------------------------------------------------*/ /*=========================================================================== FUNCTION main DESCRIPTION main entry of the program DEPENDENCIES None RETURN VALUE None ===========================================================================*/ int main( int argc, char** argv) { #ifdef FEATURE_SHARED_MEMORY printf("Shared Memory Enabled\n"); #endif #ifdef FEATURE_PINNED_MODE printf("Pinned Memory Enabled\n"); #endif #ifdef FEATURE_REDUCED_HASH_SIZE printf("Reduced Hash Size Enabled\n"); #endif #ifdef FEATURE_RUN_OVERLAP_TEST #ifdef FEATURE_RUN_SHA1 printf("Running SHA1 Overlap Test..\n"); run_sha1_overlap_test( ); #else printf("Running MD5 Overlap Test..\n"); run_md5_overlap_test( ); #endif // FEATURE_RUN_SHA1 #else #ifdef FEATURE_RUN_SHA1 printf("Running SHA1 Non-Overlap Test..\n"); run_sha1_test( ); #else printf("Running MD5 Non-Overlap Test..\n"); run_md5_test( ); #endif // FEATURE_RUN_SHA1 #endif // FEATURE_RUN_OVERLAP_TEST }
f3852c42e9e7c541a9a0f02e8417f52faaa4e724.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "addVector.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d1_in = NULL; hipMalloc(&d1_in, XSIZE*YSIZE); int *d2_in = NULL; hipMalloc(&d2_in, XSIZE*YSIZE); int *d_out = NULL; hipMalloc(&d_out, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( addVector), dim3(gridBlock),dim3(threadBlock), 0, 0, d1_in,d2_in,d_out,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( addVector), dim3(gridBlock),dim3(threadBlock), 0, 0, d1_in,d2_in,d_out,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( addVector), dim3(gridBlock),dim3(threadBlock), 0, 0, d1_in,d2_in,d_out,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f3852c42e9e7c541a9a0f02e8417f52faaa4e724.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "addVector.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d1_in = NULL; cudaMalloc(&d1_in, XSIZE*YSIZE); int *d2_in = NULL; cudaMalloc(&d2_in, XSIZE*YSIZE); int *d_out = NULL; cudaMalloc(&d_out, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); addVector<<<gridBlock,threadBlock>>>(d1_in,d2_in,d_out,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { addVector<<<gridBlock,threadBlock>>>(d1_in,d2_in,d_out,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { addVector<<<gridBlock,threadBlock>>>(d1_in,d2_in,d_out,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
785790baed4431cb51b750813d1f1d650128ffc4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_revert_kernel; int xdim0_revert_kernel_h = -1; __constant__ int ydim0_revert_kernel; int ydim0_revert_kernel_h = -1; __constant__ int xdim1_revert_kernel; int xdim1_revert_kernel_h = -1; __constant__ int ydim1_revert_kernel; int ydim1_revert_kernel_h = -1; __constant__ int xdim2_revert_kernel; int xdim2_revert_kernel_h = -1; __constant__ int ydim2_revert_kernel; int ydim2_revert_kernel_h = -1; __constant__ int xdim3_revert_kernel; int xdim3_revert_kernel_h = -1; __constant__ int ydim3_revert_kernel; int ydim3_revert_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x, y, z) \ (x + xdim0_revert_kernel * (y) + \ xdim0_revert_kernel * ydim0_revert_kernel * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_revert_kernel * (y) + \ xdim1_revert_kernel * ydim1_revert_kernel * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_revert_kernel * (y) + \ xdim2_revert_kernel * ydim2_revert_kernel * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_revert_kernel * (y) + \ xdim3_revert_kernel * ydim3_revert_kernel * (z)) // user function __device__ void revert_kernel_gpu(const double *density0, double *density1, const double *energy0, double *energy1) { density1[OPS_ACC1(0, 0, 0)] = density0[OPS_ACC0(0, 0, 0)]; energy1[OPS_ACC3(0, 0, 0)] = energy0[OPS_ACC2(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_revert_kernel(const double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, double *__restrict arg3, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_revert_kernel + idx_z * 1 * 1 * xdim0_revert_kernel * ydim0_revert_kernel; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_revert_kernel + idx_z * 1 * 1 * xdim1_revert_kernel * ydim1_revert_kernel; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_revert_kernel + idx_z * 1 * 1 * xdim2_revert_kernel * ydim2_revert_kernel; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_revert_kernel + idx_z * 1 * 1 * xdim3_revert_kernel * ydim3_revert_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { revert_kernel_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_revert_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_revert_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif // Timing double t1, t2, c1, c2; ops_arg args[4] = {arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 4, range, 104)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(104, "revert_kernel"); OPS_kernels[104].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_revert_kernel_h || ydim0 != ydim0_revert_kernel_h || xdim1 != xdim1_revert_kernel_h || ydim1 != ydim1_revert_kernel_h || xdim2 != xdim2_revert_kernel_h || ydim2 != ydim2_revert_kernel_h || xdim3 != xdim3_revert_kernel_h || ydim3 != ydim3_revert_kernel_h) { hipMemcpyToSymbol(xdim0_revert_kernel, &xdim0, sizeof(int)); xdim0_revert_kernel_h = xdim0; hipMemcpyToSymbol(ydim0_revert_kernel, &ydim0, sizeof(int)); ydim0_revert_kernel_h = ydim0; hipMemcpyToSymbol(xdim1_revert_kernel, &xdim1, sizeof(int)); xdim1_revert_kernel_h = xdim1; hipMemcpyToSymbol(ydim1_revert_kernel, &ydim1, sizeof(int)); ydim1_revert_kernel_h = ydim1; hipMemcpyToSymbol(xdim2_revert_kernel, &xdim2, sizeof(int)); xdim2_revert_kernel_h = xdim2; hipMemcpyToSymbol(ydim2_revert_kernel, &ydim2, sizeof(int)); ydim2_revert_kernel_h = ydim2; hipMemcpyToSymbol(xdim3_revert_kernel, &xdim3, sizeof(int)); xdim3_revert_kernel_h = xdim3; hipMemcpyToSymbol(ydim3_revert_kernel, &ydim3, sizeof(int)); ydim3_revert_kernel_h = ydim3; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args, 4, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[104].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_revert_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[104].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[3], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[104].mpi_time += t2 - t1; OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_revert_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 104; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 104; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg *)malloc(4 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_revert_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(104, "revert_kernel"); } ops_enqueue_kernel(desc); } #endif
785790baed4431cb51b750813d1f1d650128ffc4.cu
// // auto-generated by ops.py // __constant__ int xdim0_revert_kernel; int xdim0_revert_kernel_h = -1; __constant__ int ydim0_revert_kernel; int ydim0_revert_kernel_h = -1; __constant__ int xdim1_revert_kernel; int xdim1_revert_kernel_h = -1; __constant__ int ydim1_revert_kernel; int ydim1_revert_kernel_h = -1; __constant__ int xdim2_revert_kernel; int xdim2_revert_kernel_h = -1; __constant__ int ydim2_revert_kernel; int ydim2_revert_kernel_h = -1; __constant__ int xdim3_revert_kernel; int xdim3_revert_kernel_h = -1; __constant__ int ydim3_revert_kernel; int ydim3_revert_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x, y, z) \ (x + xdim0_revert_kernel * (y) + \ xdim0_revert_kernel * ydim0_revert_kernel * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_revert_kernel * (y) + \ xdim1_revert_kernel * ydim1_revert_kernel * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_revert_kernel * (y) + \ xdim2_revert_kernel * ydim2_revert_kernel * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_revert_kernel * (y) + \ xdim3_revert_kernel * ydim3_revert_kernel * (z)) // user function __device__ void revert_kernel_gpu(const double *density0, double *density1, const double *energy0, double *energy1) { density1[OPS_ACC1(0, 0, 0)] = density0[OPS_ACC0(0, 0, 0)]; energy1[OPS_ACC3(0, 0, 0)] = energy0[OPS_ACC2(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_revert_kernel(const double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, double *__restrict arg3, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_revert_kernel + idx_z * 1 * 1 * xdim0_revert_kernel * ydim0_revert_kernel; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_revert_kernel + idx_z * 1 * 1 * xdim1_revert_kernel * ydim1_revert_kernel; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_revert_kernel + idx_z * 1 * 1 * xdim2_revert_kernel * ydim2_revert_kernel; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_revert_kernel + idx_z * 1 * 1 * xdim3_revert_kernel * ydim3_revert_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { revert_kernel_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_revert_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_revert_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif // Timing double t1, t2, c1, c2; ops_arg args[4] = {arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 4, range, 104)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(104, "revert_kernel"); OPS_kernels[104].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_revert_kernel_h || ydim0 != ydim0_revert_kernel_h || xdim1 != xdim1_revert_kernel_h || ydim1 != ydim1_revert_kernel_h || xdim2 != xdim2_revert_kernel_h || ydim2 != ydim2_revert_kernel_h || xdim3 != xdim3_revert_kernel_h || ydim3 != ydim3_revert_kernel_h) { cudaMemcpyToSymbol(xdim0_revert_kernel, &xdim0, sizeof(int)); xdim0_revert_kernel_h = xdim0; cudaMemcpyToSymbol(ydim0_revert_kernel, &ydim0, sizeof(int)); ydim0_revert_kernel_h = ydim0; cudaMemcpyToSymbol(xdim1_revert_kernel, &xdim1, sizeof(int)); xdim1_revert_kernel_h = xdim1; cudaMemcpyToSymbol(ydim1_revert_kernel, &ydim1, sizeof(int)); ydim1_revert_kernel_h = ydim1; cudaMemcpyToSymbol(xdim2_revert_kernel, &xdim2, sizeof(int)); xdim2_revert_kernel_h = xdim2; cudaMemcpyToSymbol(ydim2_revert_kernel, &ydim2, sizeof(int)); ydim2_revert_kernel_h = ydim2; cudaMemcpyToSymbol(xdim3_revert_kernel, &xdim3, sizeof(int)); xdim3_revert_kernel_h = xdim3; cudaMemcpyToSymbol(ydim3_revert_kernel, &ydim3, sizeof(int)); ydim3_revert_kernel_h = ydim3; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args, 4, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[104].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_revert_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[104].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[3], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[104].mpi_time += t2 - t1; OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_revert_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 104; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 104; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg *)malloc(4 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_revert_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(104, "revert_kernel"); } ops_enqueue_kernel(desc); } #endif
b0b8285351ceeeb449794f856c5a2462d9139a7d.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <hip/hip_runtime.h> #include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "../FFT/parboil.h" #include "../benchmark_common.h" #include "file.h" #include "image.h" #include "largerBlocks.cu" #include "sad.h" #include "sad4.cu" #define CUDA_ERRCK \ { \ hipError_t err = hipGetLastError(); \ if (err) \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(err)); \ } /*static unsigned short * load_sads(char *filename); static void write_sads(char *filename, int image_size_macroblocks, unsigned short *sads); static void write_sads_directly(char *filename, int width, int height, unsigned short *sads);*/ /* FILE I/O unsigned short * load_sads(char *filename) { FILE *infile; unsigned short *sads; int w; int h; int sads_per_block; infile = fopen(filename, "r"); if (!infile) { fprintf(stderr, "Cannot find file '%s'\n", filename); exit(-1); } Read image dimensions (measured in macroblocks) w = read16u(infile); h = read16u(infile); Read SAD values. Only interested in the 4x4 SAD values, which are * at the end of the file. sads_per_block = MAX_POS_PADDED * (w * h); fseek(infile, 25 * sads_per_block * sizeof(unsigned short), SEEK_CUR); sads = (unsigned short *)malloc(sads_per_block * 16 * sizeof(unsigned short)); fread(sads, sizeof(unsigned short), sads_per_block * 16, infile); fclose(infile); return sads; }*/ /* Compare the reference SADs to the expected SADs. */ void check_sads(unsigned short* sads_reference, unsigned short* sads_computed, int image_size_macroblocks) { int block; /* Check the 4x4 SAD values. These are in sads_reference. * Ignore the data at the beginning of sads_computed. */ sads_computed += 25 * MAX_POS_PADDED * image_size_macroblocks; for (block = 0; block < image_size_macroblocks; block++) { int subblock; for (subblock = 0; subblock < 16; subblock++) { int sad_index; for (sad_index = 0; sad_index < MAX_POS; sad_index++) { int index = (block * 16 + subblock) * MAX_POS_PADDED + sad_index; if (sads_reference[index] != sads_computed[index]) { #if 0 /* Print exactly where the mismatch was seen */ printf("M %3d %2d %4d (%d = %d)\n", block, subblock, sad_index, sads_reference[index], sads_computed[index]); #else goto mismatch; #endif } } } } printf("Success.\n"); return; mismatch: printf("Computed SADs do not match expected values.\n"); } /* Extract the SAD data for a particular block type for a particular * macroblock from the array of SADs of that block type. */ static inline void write_subblocks(FILE* outfile, unsigned short* subblock_array, int macroblock, int count) { int block; int pos; for (block = 0; block < count; block++) { unsigned short* vec = subblock_array + (block + macroblock * count) * MAX_POS_PADDED; /* Write all SADs for this sub-block */ for (pos = 0; pos < MAX_POS; pos++) write16u(outfile, *vec++); } } void write_sads(char* filename, int image_size_macroblocks, unsigned short* sads) { FILE* outfile = fopen(filename, "w"); int block; if (outfile == NULL) { fprintf(stderr, "Cannot open output file\n"); exit(-1); } /* Write size in macroblocks */ write32u(outfile, image_size_macroblocks); /* Write zeros */ write32u(outfile, 0); /* Each macroblock */ for (block = 0; block < image_size_macroblocks; block++) { int blocktype; /* Write SADs for all sub-block types */ for (blocktype = 1; blocktype <= 7; blocktype++) write_subblocks(outfile, sads + SAD_TYPE_IX(blocktype, image_size_macroblocks), block, SAD_TYPE_CT(blocktype)); } fclose(outfile); } /* FILE I/O for debugging static void write_sads_directly(char *filename, int width, int height, unsigned short *sads) { FILE *f = fopen(filename, "w"); int n; write16u(f, width); write16u(f, height); for (n = 0; n < 41 * MAX_POS_PADDED * (width * height); n++) { write16u(f, sads[n]); } fclose(f); }*/ /*static void print_test_sad_vector(unsigned short *base, int macroblock, int count) { int n; int searchpos = 17*33+17; for (n = 0; n < count; n++) printf(" %d", base[(count * macroblock + n) * MAX_POS_PADDED + searchpos]); }*/ /*static void print_test_sads(unsigned short *sads_computed, int mbs) { int macroblock = 5; int blocktype; for (blocktype = 1; blocktype <= 7; blocktype++) { printf("%d:", blocktype); print_test_sad_vector(sads_computed + SAD_TYPE_IX(blocktype, mbs), macroblock, SAD_TYPE_CT(blocktype)); puts("\n"); } }*/ /* MAIN */ // int // main(int argc, char **argv) int main_sad(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { struct image_i16* ref_image; struct image_i16* cur_image; unsigned short* sads_computed; /* SADs generated by the program */ int image_size_bytes; int image_width_macroblocks, image_height_macroblocks; int image_size_macroblocks; struct pb_TimerSet timers; // struct pb_Parameters *params; pb_InitializeTimerSet(&timers); // params = pb_ReadParameters(&argc, argv); /*if (pb_Parameters_CountInputs(params) != 2) { fprintf(stderr, "Expecting two input filenames\n"); exit(-1); }*/ /* Read input files */ pb_SwitchToTimer(&timers, pb_TimerID_IO); ref_image = load_image((char*)"SAD/reference.bin"); cur_image = load_image((char*)"SAD/frame.bin"); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); if ((ref_image->width != cur_image->width) || (ref_image->height != cur_image->height)) { fprintf(stderr, "Input images must be the same size\n"); exit(-1); } if ((ref_image->width % 16) || (ref_image->height % 16)) { fprintf(stderr, "Input image size must be an integral multiple of 16\n"); exit(-1); } /* Compute parameters, allocate memory */ image_size_bytes = ref_image->width * ref_image->height * sizeof(short); image_width_macroblocks = ref_image->width >> 4; image_height_macroblocks = ref_image->height >> 4; image_size_macroblocks = image_width_macroblocks * image_height_macroblocks; sads_computed = (unsigned short*)malloc( 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(short)); /* Run the kernel code */ { struct hipArray* ref_ary; /* Reference image on the device */ short* d_cur_image; /* Current image on the device */ unsigned short* d_sads; /* SADs on the device */ dim3 macroblock_grid(image_width_macroblocks, image_height_macroblocks); pb_SwitchToTimer(&timers, pb_TimerID_COPY); hipMalloc((void**)&d_cur_image, image_size_bytes); CUDA_ERRCK hipMallocArray(&ref_ary, &get_ref().channelDesc, ref_image->width, ref_image->height); CUDA_ERRCK /* Transfer current image to device */ hipMemcpyAsync(d_cur_image, cur_image->data, image_size_bytes, hipMemcpyHostToDevice, stream_app); CUDA_ERRCK /* Transfer reference image to device */ hipMemcpy2DToArray(ref_ary, 0, 0, ref_image->data, ref_image->width * sizeof(unsigned short), ref_image->width * sizeof(unsigned short), ref_image->height, hipMemcpyHostToDevice); CUDA_ERRCK hipBindTextureToArray(get_ref(), ref_ary); CUDA_ERRCK /* Allocate SAD data on the device */ hipMalloc((void**)&d_sads, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short)); CUDA_ERRCK hipMemset(d_sads, 0, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short)); CUDA_ERRCK pb_SwitchToTimer(&timers, pb_TimerID_GPU); /* Run the 4x4 kernel */ hipLaunchKernelGGL(( mb_sad_calc), dim3(CEIL(ref_image->width / 4, THREADS_W), CEIL(ref_image->height / 4, THREADS_H)), dim3(dim3(CEIL(MAX_POS, POS_PER_THREAD) * THREADS_W * THREADS_H)), SAD_LOC_SIZE_BYTES, stream_app, d_sads, (unsigned short*)d_cur_image, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK /* Run the larger-blocks kernels */ hipLaunchKernelGGL(( larger_sad_calc_8), dim3(macroblock_grid), dim3(dim3(32, 4)), 0, stream_app, d_sads, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK hipLaunchKernelGGL(( larger_sad_calc_16), dim3(macroblock_grid), dim3(dim3(32, 1)), 0, stream_app, d_sads, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK pthread_mutex_unlock(mutexapp); if (flag) cutilSafeCall(hipStreamSynchronize(stream_app)); pb_SwitchToTimer(&timers, pb_TimerID_COPY); /* Transfer SAD data to the host */ hipMemcpyAsync( sads_computed, // + 25 * MAX_POS_PADDED * image_size_macroblocks, d_sads, // + 25 * MAX_POS_PADDED * image_size_macroblocks, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short), hipMemcpyDeviceToHost, stream_app); if (flag) cutilSafeCall(hipStreamSynchronize(stream_app)); CUDA_ERRCK /* Free GPU memory */ hipFree(d_sads); CUDA_ERRCK hipUnbindTexture(get_ref()); CUDA_ERRCK hipFreeArray(ref_ary); CUDA_ERRCK hipFree(d_cur_image); CUDA_ERRCK pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } /* Print output */ // if (params->outFile) //{ pb_SwitchToTimer(&timers, pb_TimerID_IO); write_sads((char*)"SAD/out.bin", image_size_macroblocks, sads_computed); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); //} #if 0 /* Debugging */ print_test_sads(sads_computed, image_size_macroblocks); write_sads_directly("sad-debug.bin", ref_image->width / 16, ref_image->height / 16, sads_computed); #endif /* Free memory */ free(sads_computed); free_image(ref_image); free_image(cur_image); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); // pb_FreeParameters(params); return 0; }
b0b8285351ceeeb449794f856c5a2462d9139a7d.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <cuda.h> #include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "../FFT/parboil.h" #include "../benchmark_common.h" #include "file.h" #include "image.h" #include "largerBlocks.cu" #include "sad.h" #include "sad4.cu" #define CUDA_ERRCK \ { \ cudaError_t err = cudaGetLastError(); \ if (err) \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err)); \ } /*static unsigned short * load_sads(char *filename); static void write_sads(char *filename, int image_size_macroblocks, unsigned short *sads); static void write_sads_directly(char *filename, int width, int height, unsigned short *sads);*/ /* FILE I/O unsigned short * load_sads(char *filename) { FILE *infile; unsigned short *sads; int w; int h; int sads_per_block; infile = fopen(filename, "r"); if (!infile) { fprintf(stderr, "Cannot find file '%s'\n", filename); exit(-1); } Read image dimensions (measured in macroblocks) w = read16u(infile); h = read16u(infile); Read SAD values. Only interested in the 4x4 SAD values, which are * at the end of the file. sads_per_block = MAX_POS_PADDED * (w * h); fseek(infile, 25 * sads_per_block * sizeof(unsigned short), SEEK_CUR); sads = (unsigned short *)malloc(sads_per_block * 16 * sizeof(unsigned short)); fread(sads, sizeof(unsigned short), sads_per_block * 16, infile); fclose(infile); return sads; }*/ /* Compare the reference SADs to the expected SADs. */ void check_sads(unsigned short* sads_reference, unsigned short* sads_computed, int image_size_macroblocks) { int block; /* Check the 4x4 SAD values. These are in sads_reference. * Ignore the data at the beginning of sads_computed. */ sads_computed += 25 * MAX_POS_PADDED * image_size_macroblocks; for (block = 0; block < image_size_macroblocks; block++) { int subblock; for (subblock = 0; subblock < 16; subblock++) { int sad_index; for (sad_index = 0; sad_index < MAX_POS; sad_index++) { int index = (block * 16 + subblock) * MAX_POS_PADDED + sad_index; if (sads_reference[index] != sads_computed[index]) { #if 0 /* Print exactly where the mismatch was seen */ printf("M %3d %2d %4d (%d = %d)\n", block, subblock, sad_index, sads_reference[index], sads_computed[index]); #else goto mismatch; #endif } } } } printf("Success.\n"); return; mismatch: printf("Computed SADs do not match expected values.\n"); } /* Extract the SAD data for a particular block type for a particular * macroblock from the array of SADs of that block type. */ static inline void write_subblocks(FILE* outfile, unsigned short* subblock_array, int macroblock, int count) { int block; int pos; for (block = 0; block < count; block++) { unsigned short* vec = subblock_array + (block + macroblock * count) * MAX_POS_PADDED; /* Write all SADs for this sub-block */ for (pos = 0; pos < MAX_POS; pos++) write16u(outfile, *vec++); } } void write_sads(char* filename, int image_size_macroblocks, unsigned short* sads) { FILE* outfile = fopen(filename, "w"); int block; if (outfile == NULL) { fprintf(stderr, "Cannot open output file\n"); exit(-1); } /* Write size in macroblocks */ write32u(outfile, image_size_macroblocks); /* Write zeros */ write32u(outfile, 0); /* Each macroblock */ for (block = 0; block < image_size_macroblocks; block++) { int blocktype; /* Write SADs for all sub-block types */ for (blocktype = 1; blocktype <= 7; blocktype++) write_subblocks(outfile, sads + SAD_TYPE_IX(blocktype, image_size_macroblocks), block, SAD_TYPE_CT(blocktype)); } fclose(outfile); } /* FILE I/O for debugging static void write_sads_directly(char *filename, int width, int height, unsigned short *sads) { FILE *f = fopen(filename, "w"); int n; write16u(f, width); write16u(f, height); for (n = 0; n < 41 * MAX_POS_PADDED * (width * height); n++) { write16u(f, sads[n]); } fclose(f); }*/ /*static void print_test_sad_vector(unsigned short *base, int macroblock, int count) { int n; int searchpos = 17*33+17; for (n = 0; n < count; n++) printf(" %d", base[(count * macroblock + n) * MAX_POS_PADDED + searchpos]); }*/ /*static void print_test_sads(unsigned short *sads_computed, int mbs) { int macroblock = 5; int blocktype; for (blocktype = 1; blocktype <= 7; blocktype++) { printf("%d:", blocktype); print_test_sad_vector(sads_computed + SAD_TYPE_IX(blocktype, mbs), macroblock, SAD_TYPE_CT(blocktype)); puts("\n"); } }*/ /* MAIN */ // int // main(int argc, char **argv) int main_sad(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { struct image_i16* ref_image; struct image_i16* cur_image; unsigned short* sads_computed; /* SADs generated by the program */ int image_size_bytes; int image_width_macroblocks, image_height_macroblocks; int image_size_macroblocks; struct pb_TimerSet timers; // struct pb_Parameters *params; pb_InitializeTimerSet(&timers); // params = pb_ReadParameters(&argc, argv); /*if (pb_Parameters_CountInputs(params) != 2) { fprintf(stderr, "Expecting two input filenames\n"); exit(-1); }*/ /* Read input files */ pb_SwitchToTimer(&timers, pb_TimerID_IO); ref_image = load_image((char*)"SAD/reference.bin"); cur_image = load_image((char*)"SAD/frame.bin"); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); if ((ref_image->width != cur_image->width) || (ref_image->height != cur_image->height)) { fprintf(stderr, "Input images must be the same size\n"); exit(-1); } if ((ref_image->width % 16) || (ref_image->height % 16)) { fprintf(stderr, "Input image size must be an integral multiple of 16\n"); exit(-1); } /* Compute parameters, allocate memory */ image_size_bytes = ref_image->width * ref_image->height * sizeof(short); image_width_macroblocks = ref_image->width >> 4; image_height_macroblocks = ref_image->height >> 4; image_size_macroblocks = image_width_macroblocks * image_height_macroblocks; sads_computed = (unsigned short*)malloc( 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(short)); /* Run the kernel code */ { struct cudaArray* ref_ary; /* Reference image on the device */ short* d_cur_image; /* Current image on the device */ unsigned short* d_sads; /* SADs on the device */ dim3 macroblock_grid(image_width_macroblocks, image_height_macroblocks); pb_SwitchToTimer(&timers, pb_TimerID_COPY); cudaMalloc((void**)&d_cur_image, image_size_bytes); CUDA_ERRCK cudaMallocArray(&ref_ary, &get_ref().channelDesc, ref_image->width, ref_image->height); CUDA_ERRCK /* Transfer current image to device */ cudaMemcpyAsync(d_cur_image, cur_image->data, image_size_bytes, cudaMemcpyHostToDevice, stream_app); CUDA_ERRCK /* Transfer reference image to device */ cudaMemcpy2DToArray(ref_ary, 0, 0, ref_image->data, ref_image->width * sizeof(unsigned short), ref_image->width * sizeof(unsigned short), ref_image->height, cudaMemcpyHostToDevice); CUDA_ERRCK cudaBindTextureToArray(get_ref(), ref_ary); CUDA_ERRCK /* Allocate SAD data on the device */ cudaMalloc((void**)&d_sads, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short)); CUDA_ERRCK cudaMemset(d_sads, 0, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short)); CUDA_ERRCK pb_SwitchToTimer(&timers, pb_TimerID_GPU); /* Run the 4x4 kernel */ mb_sad_calc<<<dim3(CEIL(ref_image->width / 4, THREADS_W), CEIL(ref_image->height / 4, THREADS_H)), dim3(CEIL(MAX_POS, POS_PER_THREAD) * THREADS_W * THREADS_H), SAD_LOC_SIZE_BYTES, stream_app>>>( d_sads, (unsigned short*)d_cur_image, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK /* Run the larger-blocks kernels */ larger_sad_calc_8<<<macroblock_grid, dim3(32, 4), 0, stream_app>>>( d_sads, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK larger_sad_calc_16<<<macroblock_grid, dim3(32, 1), 0, stream_app>>>( d_sads, image_width_macroblocks, image_height_macroblocks); CUDA_ERRCK pthread_mutex_unlock(mutexapp); if (flag) cutilSafeCall(cudaStreamSynchronize(stream_app)); pb_SwitchToTimer(&timers, pb_TimerID_COPY); /* Transfer SAD data to the host */ cudaMemcpyAsync( sads_computed, // + 25 * MAX_POS_PADDED * image_size_macroblocks, d_sads, // + 25 * MAX_POS_PADDED * image_size_macroblocks, 41 * MAX_POS_PADDED * image_size_macroblocks * sizeof(unsigned short), cudaMemcpyDeviceToHost, stream_app); if (flag) cutilSafeCall(cudaStreamSynchronize(stream_app)); CUDA_ERRCK /* Free GPU memory */ cudaFree(d_sads); CUDA_ERRCK cudaUnbindTexture(get_ref()); CUDA_ERRCK cudaFreeArray(ref_ary); CUDA_ERRCK cudaFree(d_cur_image); CUDA_ERRCK pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } /* Print output */ // if (params->outFile) //{ pb_SwitchToTimer(&timers, pb_TimerID_IO); write_sads((char*)"SAD/out.bin", image_size_macroblocks, sads_computed); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); //} #if 0 /* Debugging */ print_test_sads(sads_computed, image_size_macroblocks); write_sads_directly("sad-debug.bin", ref_image->width / 16, ref_image->height / 16, sads_computed); #endif /* Free memory */ free(sads_computed); free_image(ref_image); free_image(cur_image); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); // pb_FreeParameters(params); return 0; }
b786a098497a761922c3e96ed56d2f3904a44053.hip
// !!! This is a file automatically generated by hipify!!! #include "include/common/cuda_error_hadling.h" #include <hip/hip_runtime_api.h> #include <cfloat> #include <iostream> #include <fstream> using namespace std; ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // structure to perform union find on device template<class _T> class DeviceUnionFind { private: _T *parents, *ranks; inline __device__ void compress(int _x) { int y = parents[_x]; if (y != _x) { int z = parents[y]; if (parents[y] != y) { do { y = parents[y]; } while (y != parents[y]); parents[_x] = y; } } } public: DeviceUnionFind(int _size) { SAFE_CALL(hipMalloc(&parents, sizeof(_T) * _size)); SAFE_CALL(hipMalloc(&ranks, sizeof(_T) * _size)); } void free_memory() { SAFE_CALL(hipFree(parents)); SAFE_CALL(hipFree(ranks)); } // clean component inline __device__ void clear(int _x) { parents[_x] = _x; ranks[_x] = 0; } // return parent vertex to selected one inline __device__ int get_parent(int _x) { return parents[_x]; } // optimize component inline __device__ int flatten(int _p) { if (parents[_p] != _p) { compress(_p); } } // find parent component inline __device__ int find_fast(int _x) { int root = _x; while (root != parents[root]) root = parents[root]; while (_x != root) { int newp = parents[_x]; parents[_x] = root; _x = newp; } return root; } // merge two components inline __device__ bool merge(int _x, int _y) { _x = parents[_x]; _y = parents[_y]; while (_x != _y) { if (_y < _x) { int t = _x; _x = _y; _y = t; } int z = atomicCAS(&parents[_y], _y, _x); if (z == _y) { return true; } _x = parents[parents[_x]]; _y = parents[parents[z]]; // reuse value returned by atomicCAS } return false; } }; ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void init_mst_data(bool *_in_mst, int _edges_count) { register const int idx = (blockIdx.x * blockDim.x + threadIdx.x) + blockIdx.y * blockDim.x * gridDim.x; if (idx < _edges_count) { _in_mst[idx] = false; } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void init_components_data(int _vertices_count, DeviceUnionFind<int> _components) { register const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < _vertices_count) { _components.clear(idx); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void unset_cheapest_and_flatten(int *_cheapest, int _vertices_count, DeviceUnionFind<int> _components) { register const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < _vertices_count) { _cheapest[idx] = -1; _components.flatten(idx); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // main computational kernel, requires the most part of execution time template <typename _TEdgeWeight> __global__ void find_minimum_edges_atomic(int *_src_ids, // source vertices ids int *_dst_ids, // destination vertices ids _TEdgeWeight *_weights, // weights int _edges_count, int *_cheapest, // cheapest indexes array int _vertices_count, DeviceUnionFind<int> _components) { register const int idx = (blockIdx.x * blockDim.x + threadIdx.x) + blockIdx.y * blockDim.x * gridDim.x; // create thread per edge if (idx < _edges_count) { register int set1 = _components.get_parent(_src_ids[idx]); // get parents for both incedent edges register int set2 = _components.get_parent(_dst_ids[idx]); if (set1 != set2) // if they belong to differnt components { register int cheapest_index = _cheapest[set1]; while (cheapest_index == -1 || _weights[idx] < _weights[cheapest_index]) // atomic update minimum index for src_id vertex { if (atomicCAS(&_cheapest[set1], cheapest_index, idx) == cheapest_index) { break; } cheapest_index = _cheapest[set1]; } cheapest_index = _cheapest[set2]; while (cheapest_index == -1 || _weights[idx] < _weights[cheapest_index]) // atomic update minimum index for dst_id vertex { if (atomicCAS(&_cheapest[set2], cheapest_index, idx) == cheapest_index) { break; } cheapest_index = _cheapest[set2]; } } } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void merge_components(int *_src_ids, // source vertices ids int *_dst_ids, // destination vertices ids int *_cheapest, // cheapest indexes array int _vertices_count, bool *_in_mst, // result array int *_trees_num, DeviceUnionFind<int> _components) { register const int idx = blockIdx.x * blockDim.x + threadIdx.x; // for all vertices if (idx < _vertices_count) { if (_cheapest[idx] != -1) { // get parents int set1 = _components.get_parent(_src_ids[_cheapest[idx]]); int set2 = _components.get_parent(_dst_ids[_cheapest[idx]]); if (set1 != set2) { if (_components.merge(set1, set2)) // try to merge using best edge { _in_mst[_cheapest[idx]] = true; } else { atomicAdd(_trees_num, 1); // unsuccessful merge => increase active fragment count } atomicSub(_trees_num, 1); } } } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename _TEdgeWeight> void boruvka_wrapper(int *_src_ids, // source vertices ids int *_dst_ids, // destination vertices ids _TEdgeWeight *_weights, // weights bool *_in_mst, // result array int _vertices_count, int _edges_count) { // create grid threads dim3 threads(1024, 1, 1); dim3 grid_edges((_edges_count - 1) / threads.x + 1, 1, 1); dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1); #ifdef __USE_FERMI__ if(grid_edges.x > 65535) { grid_edges.y = (grid_edges.x - 1) / 65535 + 1; grid_edges.x = 65535; } #endif DeviceUnionFind<int> components(_vertices_count); int *cheapest; SAFE_CALL(hipMalloc(&cheapest, sizeof(int) * _vertices_count)); // init distances result array and components data SAFE_KERNEL_CALL((hipLaunchKernelGGL(( init_mst_data) , dim3(grid_edges), dim3(threads) , 0, 0, _in_mst, _edges_count) )); SAFE_KERNEL_CALL((hipLaunchKernelGGL(( init_components_data) , dim3(grid_vertices), dim3(threads) , 0, 0, _vertices_count, components) )); int *device_num_trees; SAFE_CALL(hipMalloc(&device_num_trees, sizeof(int))); int host_num_trees = _vertices_count, prev_num_trees = 0; SAFE_CALL(hipMemcpy(device_num_trees, &host_num_trees, sizeof(int), hipMemcpyHostToDevice)); while (host_num_trees != prev_num_trees) // update graph while number of trees changes { // update components for all vertices SAFE_KERNEL_CALL((hipLaunchKernelGGL(( unset_cheapest_and_flatten) , dim3(grid_vertices), dim3(threads) , 0, 0, cheapest, _vertices_count, components) )); // find cheapest edges to merge componets in the future SAFE_KERNEL_CALL((hipLaunchKernelGGL(( find_minimum_edges_atomic) , dim3(grid_edges), dim3(threads) , 0, 0, _src_ids, _dst_ids, _weights, _edges_count, cheapest, _vertices_count, components) )); prev_num_trees = host_num_trees; // merge components with edges, found on previous step SAFE_KERNEL_CALL((hipLaunchKernelGGL(( merge_components) , dim3(grid_vertices), dim3(threads) , 0, 0, _src_ids, _dst_ids, cheapest, _vertices_count, _in_mst, device_num_trees, components) )); SAFE_CALL(hipMemcpy(&host_num_trees, device_num_trees, sizeof(int), hipMemcpyDeviceToHost)); } SAFE_CALL(hipFree(device_num_trees)); SAFE_CALL(hipFree(cheapest)); components.free_memory(); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template void boruvka_wrapper<float>(int *_edges_src_ids, int *_edges_dst_ids, float *_edges_weights, bool *_in_mst, int _vertices_count, int _edges_count); template void boruvka_wrapper<double>(int *_edges_src_ids, int *_edges_dst_ids, double *_edges_weights,bool *_in_mst, int _vertices_count, int _edges_count); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
b786a098497a761922c3e96ed56d2f3904a44053.cu
#include "include/common/cuda_error_hadling.h" #include <cuda_runtime_api.h> #include <cfloat> #include <iostream> #include <fstream> using namespace std; ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // structure to perform union find on device template<class _T> class DeviceUnionFind { private: _T *parents, *ranks; inline __device__ void compress(int _x) { int y = parents[_x]; if (y != _x) { int z = parents[y]; if (parents[y] != y) { do { y = parents[y]; } while (y != parents[y]); parents[_x] = y; } } } public: DeviceUnionFind(int _size) { SAFE_CALL(cudaMalloc(&parents, sizeof(_T) * _size)); SAFE_CALL(cudaMalloc(&ranks, sizeof(_T) * _size)); } void free_memory() { SAFE_CALL(cudaFree(parents)); SAFE_CALL(cudaFree(ranks)); } // clean component inline __device__ void clear(int _x) { parents[_x] = _x; ranks[_x] = 0; } // return parent vertex to selected one inline __device__ int get_parent(int _x) { return parents[_x]; } // optimize component inline __device__ int flatten(int _p) { if (parents[_p] != _p) { compress(_p); } } // find parent component inline __device__ int find_fast(int _x) { int root = _x; while (root != parents[root]) root = parents[root]; while (_x != root) { int newp = parents[_x]; parents[_x] = root; _x = newp; } return root; } // merge two components inline __device__ bool merge(int _x, int _y) { _x = parents[_x]; _y = parents[_y]; while (_x != _y) { if (_y < _x) { int t = _x; _x = _y; _y = t; } int z = atomicCAS(&parents[_y], _y, _x); if (z == _y) { return true; } _x = parents[parents[_x]]; _y = parents[parents[z]]; // reuse value returned by atomicCAS } return false; } }; ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void init_mst_data(bool *_in_mst, int _edges_count) { register const int idx = (blockIdx.x * blockDim.x + threadIdx.x) + blockIdx.y * blockDim.x * gridDim.x; if (idx < _edges_count) { _in_mst[idx] = false; } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void init_components_data(int _vertices_count, DeviceUnionFind<int> _components) { register const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < _vertices_count) { _components.clear(idx); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void unset_cheapest_and_flatten(int *_cheapest, int _vertices_count, DeviceUnionFind<int> _components) { register const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < _vertices_count) { _cheapest[idx] = -1; _components.flatten(idx); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // main computational kernel, requires the most part of execution time template <typename _TEdgeWeight> __global__ void find_minimum_edges_atomic(int *_src_ids, // source vertices ids int *_dst_ids, // destination vertices ids _TEdgeWeight *_weights, // weights int _edges_count, int *_cheapest, // cheapest indexes array int _vertices_count, DeviceUnionFind<int> _components) { register const int idx = (blockIdx.x * blockDim.x + threadIdx.x) + blockIdx.y * blockDim.x * gridDim.x; // create thread per edge if (idx < _edges_count) { register int set1 = _components.get_parent(_src_ids[idx]); // get parents for both incedent edges register int set2 = _components.get_parent(_dst_ids[idx]); if (set1 != set2) // if they belong to differnt components { register int cheapest_index = _cheapest[set1]; while (cheapest_index == -1 || _weights[idx] < _weights[cheapest_index]) // atomic update minimum index for src_id vertex { if (atomicCAS(&_cheapest[set1], cheapest_index, idx) == cheapest_index) { break; } cheapest_index = _cheapest[set1]; } cheapest_index = _cheapest[set2]; while (cheapest_index == -1 || _weights[idx] < _weights[cheapest_index]) // atomic update minimum index for dst_id vertex { if (atomicCAS(&_cheapest[set2], cheapest_index, idx) == cheapest_index) { break; } cheapest_index = _cheapest[set2]; } } } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void merge_components(int *_src_ids, // source vertices ids int *_dst_ids, // destination vertices ids int *_cheapest, // cheapest indexes array int _vertices_count, bool *_in_mst, // result array int *_trees_num, DeviceUnionFind<int> _components) { register const int idx = blockIdx.x * blockDim.x + threadIdx.x; // for all vertices if (idx < _vertices_count) { if (_cheapest[idx] != -1) { // get parents int set1 = _components.get_parent(_src_ids[_cheapest[idx]]); int set2 = _components.get_parent(_dst_ids[_cheapest[idx]]); if (set1 != set2) { if (_components.merge(set1, set2)) // try to merge using best edge { _in_mst[_cheapest[idx]] = true; } else { atomicAdd(_trees_num, 1); // unsuccessful merge => increase active fragment count } atomicSub(_trees_num, 1); } } } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename _TEdgeWeight> void boruvka_wrapper(int *_src_ids, // source vertices ids int *_dst_ids, // destination vertices ids _TEdgeWeight *_weights, // weights bool *_in_mst, // result array int _vertices_count, int _edges_count) { // create grid threads dim3 threads(1024, 1, 1); dim3 grid_edges((_edges_count - 1) / threads.x + 1, 1, 1); dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1); #ifdef __USE_FERMI__ if(grid_edges.x > 65535) { grid_edges.y = (grid_edges.x - 1) / 65535 + 1; grid_edges.x = 65535; } #endif DeviceUnionFind<int> components(_vertices_count); int *cheapest; SAFE_CALL(cudaMalloc(&cheapest, sizeof(int) * _vertices_count)); // init distances result array and components data SAFE_KERNEL_CALL(( init_mst_data <<< grid_edges, threads >>> (_in_mst, _edges_count) )); SAFE_KERNEL_CALL(( init_components_data <<< grid_vertices, threads >>> (_vertices_count, components) )); int *device_num_trees; SAFE_CALL(cudaMalloc(&device_num_trees, sizeof(int))); int host_num_trees = _vertices_count, prev_num_trees = 0; SAFE_CALL(cudaMemcpy(device_num_trees, &host_num_trees, sizeof(int), cudaMemcpyHostToDevice)); while (host_num_trees != prev_num_trees) // update graph while number of trees changes { // update components for all vertices SAFE_KERNEL_CALL(( unset_cheapest_and_flatten <<< grid_vertices, threads >>> (cheapest, _vertices_count, components) )); // find cheapest edges to merge componets in the future SAFE_KERNEL_CALL(( find_minimum_edges_atomic <<< grid_edges, threads >>> (_src_ids, _dst_ids, _weights, _edges_count, cheapest, _vertices_count, components) )); prev_num_trees = host_num_trees; // merge components with edges, found on previous step SAFE_KERNEL_CALL(( merge_components <<< grid_vertices, threads >>> (_src_ids, _dst_ids, cheapest, _vertices_count, _in_mst, device_num_trees, components) )); SAFE_CALL(cudaMemcpy(&host_num_trees, device_num_trees, sizeof(int), cudaMemcpyDeviceToHost)); } SAFE_CALL(cudaFree(device_num_trees)); SAFE_CALL(cudaFree(cheapest)); components.free_memory(); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template void boruvka_wrapper<float>(int *_edges_src_ids, int *_edges_dst_ids, float *_edges_weights, bool *_in_mst, int _vertices_count, int _edges_count); template void boruvka_wrapper<double>(int *_edges_src_ids, int *_edges_dst_ids, double *_edges_weights,bool *_in_mst, int _vertices_count, int _edges_count); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
d6027aa3e4d3b8106ed443b7645fa94a04943459.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <string.h> #define STB_IMAGE_IMPLEMENTATION #include <math.h> #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #include <time.h> #include <dirent.h> #define SIZE 32 void read_frames(uint8_t* frame, int ini, int size, int sizeFrame) { for (int i = ini; i < size; ++i) { char filename[300]; sprintf(filename, "pics/thumb%d.jpg",i+1); int width, height, bpp; uint8_t* rgb_image = stbi_load(filename, &width, &height, &bpp, 3); for(int j = 0; j < sizeFrame; ++j) frame[i*sizeFrame+j] = rgb_image[j]; } } //////////////////// | ///CODIGO CUDA////// | /////////////////// v __global__ void KernelByN (int Ncol, int Nfil, uint8_t *A, int Nframes, int SzFrame) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < Nfil && col < Ncol){ for (int i = 0; i < Nframes; ++i) { int ind = (row * Ncol + col)*3 + i*SzFrame; A[ind] = A[ind+1] = A[ind+2] = (A[ind] + A[ind+1] + A[ind+2])/3; } } } void CheckCudaError(char sms[], int line); int main(int argc, char** argv) { if (argc < 2) { printf("Necesito la ruta del video en mp4!\n"); return -1; } int Nfil, Ncol; unsigned long numBytes; unsigned int nThreads; float TiempoTotal, TiempoKernel; hipEvent_t E0, E1, E2, E3; hipEvent_t X1, X2, X3; uint8_t *Host_I1; uint8_t *Host_I2; uint8_t *Host_I3; uint8_t *Host_I4; uint8_t *Dev_I1; uint8_t *Dev_I2; uint8_t *Dev_I3; uint8_t *Dev_I4; //Sacar los fotogramas del video usando FFMPEG char *filename = argv[1]; system("mkdir pics"); system("mkdir pics2"); char *auxCommand = "pics/thumb%d.jpg -hide_banner"; char comando[300]; sprintf(comando, "ffmpeg -i %s.mp4 %s",filename,auxCommand); system(comando); sprintf(comando,"ffmpeg -i %s.mp4 -vn -acodec copy audio.aac",filename); system(comando); //Contar el numero de fotogramas obtenidos DIR *d; struct dirent *dir; d = opendir("pics/"); unsigned long frames = 0; if (d) { while ((dir = readdir(d)) != NULL) { frames++; } closedir(d); } int bpp; stbi_load("pics/thumb1.jpg", &Ncol, &Nfil, &bpp, 3); printf("Leyendo %d fotogramas de %d x %d resolucion...\n",frames-2, Ncol, Nfil); numBytes = (frames-2) * (3 * Nfil * Ncol) * sizeof(uint8_t); //Guardamos 3 uint8_t (height, width i bpp) + un uint8_t por cada color (3*width*height) //Podemos cargarnos la struct y considerar que los 3 primeros valores son height, width y bpp, y los (3*width*height) siguientes el data, todo eso por cada frame. //Cada frame ocupa 3*Nfil*Ncol uint8_t. int count; hipGetDeviceCount(&count); if (count < 4) { printf("No hay suficientes GPUs\n"); exit(0); } // Obtener Memoria en el host printf("Numero de bytes: %lu\n", numBytes); hipHostMalloc((uint8_t**)&Host_I1, numBytes/4); hipHostMalloc((uint8_t**)&Host_I2, numBytes/4); hipHostMalloc((uint8_t**)&Host_I3, numBytes/4); hipHostMalloc((uint8_t**)&Host_I4, numBytes/4); read_frames(Host_I1, 0, (frames-2)/4, 3 * Nfil * Ncol); read_frames(Host_I2, (frames-2)/4, (frames-2)/2, 3 * Nfil * Ncol); read_frames(Host_I3, (frames-2)/2, 3*(frames-2)/4, 3 * Nfil * Ncol); read_frames(Host_I4, 3*(frames-2)/4, (frames-2), 3 * Nfil * Ncol); hipEventCreate(&E0); hipEventCreate(&E1); hipEventCreate(&E2); hipEventCreate(&E3); printf("Aplicando filtro...\n"); // // KERNEL ELEMENTO a ELEMENTO // // numero de Threads en cada dimension nThreads = SIZE; // numero de Blocks en cada dimension int nBlocksFil = (Nfil+nThreads-1)/nThreads; //tener en cuenta 3componentes RGB?? int nBlocksCol = (Ncol+nThreads-1)/nThreads; dim3 dimGridE(nBlocksFil, nBlocksCol, 1); dim3 dimBlockE(nThreads, nThreads, 1); hipEventRecord(E0, 0); hipEventSynchronize(E0); // Obtener Memoria en el devicecudaMallocHost((float**)&hA0, numBytesA); hipSetDevice(0); hipHostMalloc((uint8_t**)&Dev_I1, numBytes/4); hipSetDevice(1); hipHostMalloc((uint8_t**)&Dev_I2, numBytes/4); hipEventCreate(&X1); hipSetDevice(2); hipHostMalloc((uint8_t**)&Dev_I3, numBytes/4); hipEventCreate(&X2); hipSetDevice(3); hipHostMalloc((uint8_t**)&Dev_I4, numBytes/4); hipEventCreate(&X3); // Copiar datos desde el host en el device hipSetDevice(0); hipMemcpy(Dev_I1, Host_I1, numBytes/4, hipMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel elemento a elemento hipLaunchKernelGGL(( KernelByN), dim3(dimGridE), dim3(dimBlockE), 0, 0, Ncol, Nfil, Dev_I1, (frames-2)/4, 3 * Nfil * Ncol); CheckCudaError((char *) "Invocar Kernel", __LINE__); hipSetDevice(1); hipMemcpy(Dev_I2, Host_I2, numBytes/4, hipMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel elemento a elemento hipLaunchKernelGGL(( KernelByN), dim3(dimGridE), dim3(dimBlockE), 0, 0, Ncol, Nfil, Dev_I2, (frames-2)/4, 3 * Nfil * Ncol); CheckCudaError((char *) "Invocar Kernel", __LINE__); hipSetDevice(2); hipMemcpy(Dev_I3, Host_I3, numBytes/4, hipMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel elemento a elemento hipLaunchKernelGGL(( KernelByN), dim3(dimGridE), dim3(dimBlockE), 0, 0, Ncol, Nfil, Dev_I3, (frames-2)/4, 3 * Nfil * Ncol); CheckCudaError((char *) "Invocar Kernel", __LINE__); hipSetDevice(3); hipMemcpy(Dev_I4, Host_I4, numBytes/4, hipMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel elemento a elemento hipLaunchKernelGGL(( KernelByN), dim3(dimGridE), dim3(dimBlockE), 0, 0, Ncol, Nfil, Dev_I4, (frames-2)/4, 3 * Nfil * Ncol); CheckCudaError((char *) "Invocar Kernel", __LINE__); hipEventRecord(E2, 0); hipEventSynchronize(E2); // Obtener el resultado desde el host hipSetDevice(0); // Obtener el resultado desde el host hipMemcpyAsync(Host_I1, Dev_I1, numBytes/4, hipMemcpyDeviceToHost); CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); hipSetDevice(1); // Obtener el resultado desde el host hipMemcpyAsync(Host_I2, Dev_I2, numBytes/4, hipMemcpyDeviceToHost); CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); hipEventRecord(X1, 0); hipSetDevice(2); // Obtener el resultado desde el host hipMemcpyAsync(Host_I3, Dev_I3, numBytes/4, hipMemcpyDeviceToHost); CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); hipEventRecord(X2, 0); hipSetDevice(3); // Obtener el resultado desde el host hipMemcpyAsync(Host_I4, Dev_I4, numBytes/4, hipMemcpyDeviceToHost); CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); hipEventRecord(X3, 0); // Liberar Memoria del device hipEventRecord(E3, 0); hipEventSynchronize(E3); hipSetDevice(0); hipEventSynchronize(X1); hipEventSynchronize(X2); hipEventSynchronize(X3); hipSetDevice(0); hipFree(Dev_I1); hipSetDevice(1); hipFree(Dev_I2); hipSetDevice(2); hipFree(Dev_I3); hipSetDevice(3); hipFree(Dev_I4); hipEventElapsedTime(&TiempoTotal, E0, E3); printf("Tiempo Global: %4.6f milseg\n", TiempoTotal); printf("Bandwidth: %4.6f GB/s\n", (float)(((float)(numBytes/TiempoTotal))/1000000)); printf("Rendimiento Global: %4.2f GFLOPS\n", (4.0 * (float) Nfil/3 * (float) Ncol * (float) (frames-2)) / (1000000.0 * TiempoTotal)); hipEventDestroy(E0); hipEventDestroy(E1); hipEventDestroy(E2); hipEventDestroy(E3); printf("Writing...\n"); char picname[300]; for (int i = 0; i < frames-2; ++i) { printf("\rIn progress %d %", i*100/(frames-2)); ///'size' no definido (solucin: lo pongo en mayusculas, no se si es la variable a la que te querias referir) sprintf(picname, "thumb%d.jpg",i+1); char ruta [300]; sprintf(ruta, "pics2/%s",picname); if (i < (frames-2)/4) stbi_write_jpg(ruta, Nfil/3, Ncol, 3, &Host_I1[i*Nfil * Ncol], Nfil); if (i >= (frames-2)/4 && i < (frames-2)/2) stbi_write_jpg(ruta, Nfil/3, Ncol, 3, &Host_I2[(i-(frames-2)/4)*Nfil * Ncol], Nfil); if (i >= (frames-2)/2 && i < 3*(frames-2)/4) stbi_write_jpg(ruta, Nfil/3, Ncol, 3, &Host_I3[(i-(frames-2)/2)*Nfil * Ncol], Nfil); if (i >= 3*(frames-2)/4 && i < (frames-2)) stbi_write_jpg(ruta, Nfil/3, Ncol, 3, &Host_I4[(i-3*(frames-2)/4)*Nfil * Ncol], Nfil); } printf("\nRemoving residuals...\n"); auxCommand = "ffmpeg -framerate 25 -i pics2/thumb%d.jpg"; sprintf(comando, "%s -pattern_type glob -c:v libx264 -pix_fmt yuv420p %s_out_provisional.mp4",auxCommand, filename); system(comando); sprintf(comando,"ffmpeg -i %s_out_provisional.mp4 -i audio.aac -c:v copy -c:a aac -strict experimental %s_out.mp4",filename,filename); system(comando); sprintf(comando,"rm %s_out_provisional.mp4",filename); system(comando); system("rm audio.aac"); system("rm -rf pics2"); return 0; } void CheckCudaError(char sms[], int line) { hipError_t error; error = hipGetLastError(); if (error) { printf("(ERROR) %s - %s in %s at line %d\n", sms, hipGetErrorString(error), __FILE__, line); exit(EXIT_FAILURE); } //else printf("(OK) %s \n", sms); }
d6027aa3e4d3b8106ed443b7645fa94a04943459.cu
#include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <string.h> #define STB_IMAGE_IMPLEMENTATION #include <math.h> #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #include <time.h> #include <dirent.h> #define SIZE 32 void read_frames(uint8_t* frame, int ini, int size, int sizeFrame) { for (int i = ini; i < size; ++i) { char filename[300]; sprintf(filename, "pics/thumb%d.jpg",i+1); int width, height, bpp; uint8_t* rgb_image = stbi_load(filename, &width, &height, &bpp, 3); for(int j = 0; j < sizeFrame; ++j) frame[i*sizeFrame+j] = rgb_image[j]; } } //////////////////// | ///CODIGO CUDA////// | /////////////////// v __global__ void KernelByN (int Ncol, int Nfil, uint8_t *A, int Nframes, int SzFrame) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < Nfil && col < Ncol){ for (int i = 0; i < Nframes; ++i) { int ind = (row * Ncol + col)*3 + i*SzFrame; A[ind] = A[ind+1] = A[ind+2] = (A[ind] + A[ind+1] + A[ind+2])/3; } } } void CheckCudaError(char sms[], int line); int main(int argc, char** argv) { if (argc < 2) { printf("Necesito la ruta del video en mp4!\n"); return -1; } int Nfil, Ncol; unsigned long numBytes; unsigned int nThreads; float TiempoTotal, TiempoKernel; cudaEvent_t E0, E1, E2, E3; cudaEvent_t X1, X2, X3; uint8_t *Host_I1; uint8_t *Host_I2; uint8_t *Host_I3; uint8_t *Host_I4; uint8_t *Dev_I1; uint8_t *Dev_I2; uint8_t *Dev_I3; uint8_t *Dev_I4; //Sacar los fotogramas del video usando FFMPEG char *filename = argv[1]; system("mkdir pics"); system("mkdir pics2"); char *auxCommand = "pics/thumb%d.jpg -hide_banner"; char comando[300]; sprintf(comando, "ffmpeg -i %s.mp4 %s",filename,auxCommand); system(comando); sprintf(comando,"ffmpeg -i %s.mp4 -vn -acodec copy audio.aac",filename); system(comando); //Contar el numero de fotogramas obtenidos DIR *d; struct dirent *dir; d = opendir("pics/"); unsigned long frames = 0; if (d) { while ((dir = readdir(d)) != NULL) { frames++; } closedir(d); } int bpp; stbi_load("pics/thumb1.jpg", &Ncol, &Nfil, &bpp, 3); printf("Leyendo %d fotogramas de %d x %d resolucion...\n",frames-2, Ncol, Nfil); numBytes = (frames-2) * (3 * Nfil * Ncol) * sizeof(uint8_t); //Guardamos 3 uint8_t (height, width i bpp) + un uint8_t por cada color (3*width*height) //Podemos cargarnos la struct y considerar que los 3 primeros valores son height, width y bpp, y los (3*width*height) siguientes el data, todo eso por cada frame. //Cada frame ocupa 3*Nfil*Ncol uint8_t. int count; cudaGetDeviceCount(&count); if (count < 4) { printf("No hay suficientes GPUs\n"); exit(0); } // Obtener Memoria en el host printf("Numero de bytes: %lu\n", numBytes); cudaMallocHost((uint8_t**)&Host_I1, numBytes/4); cudaMallocHost((uint8_t**)&Host_I2, numBytes/4); cudaMallocHost((uint8_t**)&Host_I3, numBytes/4); cudaMallocHost((uint8_t**)&Host_I4, numBytes/4); read_frames(Host_I1, 0, (frames-2)/4, 3 * Nfil * Ncol); read_frames(Host_I2, (frames-2)/4, (frames-2)/2, 3 * Nfil * Ncol); read_frames(Host_I3, (frames-2)/2, 3*(frames-2)/4, 3 * Nfil * Ncol); read_frames(Host_I4, 3*(frames-2)/4, (frames-2), 3 * Nfil * Ncol); cudaEventCreate(&E0); cudaEventCreate(&E1); cudaEventCreate(&E2); cudaEventCreate(&E3); printf("Aplicando filtro...\n"); // // KERNEL ELEMENTO a ELEMENTO // // numero de Threads en cada dimension nThreads = SIZE; // numero de Blocks en cada dimension int nBlocksFil = (Nfil+nThreads-1)/nThreads; //tener en cuenta 3componentes RGB?? int nBlocksCol = (Ncol+nThreads-1)/nThreads; dim3 dimGridE(nBlocksFil, nBlocksCol, 1); dim3 dimBlockE(nThreads, nThreads, 1); cudaEventRecord(E0, 0); cudaEventSynchronize(E0); // Obtener Memoria en el devicecudaMallocHost((float**)&hA0, numBytesA); cudaSetDevice(0); cudaMallocHost((uint8_t**)&Dev_I1, numBytes/4); cudaSetDevice(1); cudaMallocHost((uint8_t**)&Dev_I2, numBytes/4); cudaEventCreate(&X1); cudaSetDevice(2); cudaMallocHost((uint8_t**)&Dev_I3, numBytes/4); cudaEventCreate(&X2); cudaSetDevice(3); cudaMallocHost((uint8_t**)&Dev_I4, numBytes/4); cudaEventCreate(&X3); // Copiar datos desde el host en el device cudaSetDevice(0); cudaMemcpy(Dev_I1, Host_I1, numBytes/4, cudaMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel elemento a elemento KernelByN<<<dimGridE, dimBlockE>>>(Ncol, Nfil, Dev_I1, (frames-2)/4, 3 * Nfil * Ncol); CheckCudaError((char *) "Invocar Kernel", __LINE__); cudaSetDevice(1); cudaMemcpy(Dev_I2, Host_I2, numBytes/4, cudaMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel elemento a elemento KernelByN<<<dimGridE, dimBlockE>>>(Ncol, Nfil, Dev_I2, (frames-2)/4, 3 * Nfil * Ncol); CheckCudaError((char *) "Invocar Kernel", __LINE__); cudaSetDevice(2); cudaMemcpy(Dev_I3, Host_I3, numBytes/4, cudaMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel elemento a elemento KernelByN<<<dimGridE, dimBlockE>>>(Ncol, Nfil, Dev_I3, (frames-2)/4, 3 * Nfil * Ncol); CheckCudaError((char *) "Invocar Kernel", __LINE__); cudaSetDevice(3); cudaMemcpy(Dev_I4, Host_I4, numBytes/4, cudaMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel elemento a elemento KernelByN<<<dimGridE, dimBlockE>>>(Ncol, Nfil, Dev_I4, (frames-2)/4, 3 * Nfil * Ncol); CheckCudaError((char *) "Invocar Kernel", __LINE__); cudaEventRecord(E2, 0); cudaEventSynchronize(E2); // Obtener el resultado desde el host cudaSetDevice(0); // Obtener el resultado desde el host cudaMemcpyAsync(Host_I1, Dev_I1, numBytes/4, cudaMemcpyDeviceToHost); CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); cudaSetDevice(1); // Obtener el resultado desde el host cudaMemcpyAsync(Host_I2, Dev_I2, numBytes/4, cudaMemcpyDeviceToHost); CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); cudaEventRecord(X1, 0); cudaSetDevice(2); // Obtener el resultado desde el host cudaMemcpyAsync(Host_I3, Dev_I3, numBytes/4, cudaMemcpyDeviceToHost); CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); cudaEventRecord(X2, 0); cudaSetDevice(3); // Obtener el resultado desde el host cudaMemcpyAsync(Host_I4, Dev_I4, numBytes/4, cudaMemcpyDeviceToHost); CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); cudaEventRecord(X3, 0); // Liberar Memoria del device cudaEventRecord(E3, 0); cudaEventSynchronize(E3); cudaSetDevice(0); cudaEventSynchronize(X1); cudaEventSynchronize(X2); cudaEventSynchronize(X3); cudaSetDevice(0); cudaFree(Dev_I1); cudaSetDevice(1); cudaFree(Dev_I2); cudaSetDevice(2); cudaFree(Dev_I3); cudaSetDevice(3); cudaFree(Dev_I4); cudaEventElapsedTime(&TiempoTotal, E0, E3); printf("Tiempo Global: %4.6f milseg\n", TiempoTotal); printf("Bandwidth: %4.6f GB/s\n", (float)(((float)(numBytes/TiempoTotal))/1000000)); printf("Rendimiento Global: %4.2f GFLOPS\n", (4.0 * (float) Nfil/3 * (float) Ncol * (float) (frames-2)) / (1000000.0 * TiempoTotal)); cudaEventDestroy(E0); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3); printf("Writing...\n"); char picname[300]; for (int i = 0; i < frames-2; ++i) { printf("\rIn progress %d %", i*100/(frames-2)); ///'size' no definido (solución: lo pongo en mayusculas, no se si es la variable a la que te querias referir) sprintf(picname, "thumb%d.jpg",i+1); char ruta [300]; sprintf(ruta, "pics2/%s",picname); if (i < (frames-2)/4) stbi_write_jpg(ruta, Nfil/3, Ncol, 3, &Host_I1[i*Nfil * Ncol], Nfil); if (i >= (frames-2)/4 && i < (frames-2)/2) stbi_write_jpg(ruta, Nfil/3, Ncol, 3, &Host_I2[(i-(frames-2)/4)*Nfil * Ncol], Nfil); if (i >= (frames-2)/2 && i < 3*(frames-2)/4) stbi_write_jpg(ruta, Nfil/3, Ncol, 3, &Host_I3[(i-(frames-2)/2)*Nfil * Ncol], Nfil); if (i >= 3*(frames-2)/4 && i < (frames-2)) stbi_write_jpg(ruta, Nfil/3, Ncol, 3, &Host_I4[(i-3*(frames-2)/4)*Nfil * Ncol], Nfil); } printf("\nRemoving residuals...\n"); auxCommand = "ffmpeg -framerate 25 -i pics2/thumb%d.jpg"; sprintf(comando, "%s -pattern_type glob -c:v libx264 -pix_fmt yuv420p %s_out_provisional.mp4",auxCommand, filename); system(comando); sprintf(comando,"ffmpeg -i %s_out_provisional.mp4 -i audio.aac -c:v copy -c:a aac -strict experimental %s_out.mp4",filename,filename); system(comando); sprintf(comando,"rm %s_out_provisional.mp4",filename); system(comando); system("rm audio.aac"); system("rm -rf pics2"); return 0; } void CheckCudaError(char sms[], int line) { cudaError_t error; error = cudaGetLastError(); if (error) { printf("(ERROR) %s - %s in %s at line %d\n", sms, cudaGetErrorString(error), __FILE__, line); exit(EXIT_FAILURE); } //else printf("(OK) %s \n", sms); }
189e97a2c3677a69cf605d4d7aced22d47abd082.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/IVFInterleaved.cuh> #include <faiss/gpu/impl/scan/IVFInterleavedImpl.cuh> namespace faiss { namespace gpu { constexpr uint32_t kMaxUInt32 = std::numeric_limits<uint32_t>::max(); // Second-pass kernel to further k-select the results from the first pass across // IVF lists and produce the final results template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ> __global__ void ivfInterleavedScan2(Tensor<float, 3, true> distanceIn, Tensor<int, 3, true> indicesIn, Tensor<int, 2, true> listIds, int k, void** listIndices, IndicesOptions opt, bool dir, Tensor<float, 2, true> distanceOut, Tensor<Index::idx_t, 2, true> indicesOut) { int queryId = blockIdx.x; constexpr int kNumWarps = ThreadsPerBlock / kWarpSize; __shared__ float smemK[kNumWarps * NumWarpQ]; __shared__ uint32_t smemV[kNumWarps * NumWarpQ]; // To avoid creating excessive specializations, we combine direction kernels, // selecting for the smallest element. If `dir` is true, we negate all values // being selected (so that we are selecting the largest element). BlockSelect<float, uint32_t, false, Comparator<float>, NumWarpQ, NumThreadQ, ThreadsPerBlock> heap(kFloatMax, kMaxUInt32, smemK, smemV, k); // nprobe x k int num = distanceIn.getSize(1) * distanceIn.getSize(2); auto distanceBase = distanceIn[queryId].data(); int limit = utils::roundDown(num, kWarpSize); // This will keep our negation factor float adj = dir ? -1 : 1; int i = threadIdx.x; for (; i < limit; i += blockDim.x) { // We represent the index as (probe id)(k) // Right now, both are limited to a maximum of 2048, but we will dedicate // each to the high and low words of a uint32_t static_assert(GPU_MAX_SELECTION_K <= 65536, ""); uint32_t curProbe = i / k; uint32_t curK = i % k; uint32_t index = (curProbe << 16) | (curK & (uint32_t) 0xffff); int listId = listIds[queryId][curProbe]; if (listId != -1) { // Adjust the value we are selecting based on the sorting order heap.addThreadQ(distanceBase[i] * adj, index); } heap.checkThreadQ(); } // Handle warp divergence separately if (i < num) { uint32_t curProbe = i / k; uint32_t curK = i % k; uint32_t index = (curProbe << 16) | (curK & (uint32_t) 0xffff); int listId = listIds[queryId][curProbe]; if (listId != -1) { heap.addThreadQ(distanceBase[i] * adj, index); } } // Merge all final results heap.reduce(); for (int i = threadIdx.x; i < k; i += blockDim.x) { // Re-adjust the value we are selecting based on the sorting order distanceOut[queryId][i] = smemK[i] * adj; auto packedIndex = smemV[i]; // We need to remap to the user-provided indices Index::idx_t index = -1; // We may not have at least k values to return; in this function, max uint32 // is our sentinel value if (packedIndex != kMaxUInt32) { uint32_t curProbe = packedIndex >> 16; uint32_t curK = packedIndex & 0xffff; int listId = listIds[queryId][curProbe]; int listOffset = indicesIn[queryId][curProbe][curK]; if (opt == INDICES_32_BIT) { index = (Index::idx_t) ((int*) listIndices[listId])[listOffset]; } else if (opt == INDICES_64_BIT) { index = ((Index::idx_t*) listIndices[listId])[listOffset]; } else { index = ((Index::idx_t) listId << 32 | (Index::idx_t) listOffset); } } indicesOut[queryId][i] = index; } } void runIVFInterleavedScan2(Tensor<float, 3, true>& distanceIn, Tensor<int, 3, true>& indicesIn, Tensor<int, 2, true>& listIds, int k, thrust::device_vector<void*>& listIndices, IndicesOptions indicesOptions, bool dir, Tensor<float, 2, true>& distanceOut, Tensor<Index::idx_t, 2, true>& indicesOut, hipStream_t stream) { #define IVF_SCAN_2(THREADS, NUM_WARP_Q, NUM_THREAD_Q) \ hipLaunchKernelGGL(( ivfInterleavedScan2<THREADS, NUM_WARP_Q, NUM_THREAD_Q>) \ , dim3(distanceIn.getSize(0)), dim3(THREADS), 0, stream, \ distanceIn, \ indicesIn, \ listIds, \ k, \ listIndices.data().get(), \ indicesOptions, \ dir, \ distanceOut, \ indicesOut) if (k == 1) { IVF_SCAN_2(128, 1, 1); } else if (k <= 32) { IVF_SCAN_2(128, 32, 2); } else if (k <= 64) { IVF_SCAN_2(128, 64, 3); } else if (k <= 128) { IVF_SCAN_2(128, 128, 3); } else if (k <= 256) { IVF_SCAN_2(128, 256, 4); } else if (k <= 512) { IVF_SCAN_2(128, 512, 8); } else if (k <= 1024) { IVF_SCAN_2(128, 1024, 8); } #if GPU_MAX_SELECTION_K >= 2048 else if (k <= 2048) { IVF_SCAN_2(64, 2048, 8); } #endif } void runIVFInterleavedScan(Tensor<float, 2, true>& queries, Tensor<int, 2, true>& listIds, thrust::device_vector<void*>& listData, thrust::device_vector<void*>& listIndices, IndicesOptions indicesOptions, thrust::device_vector<int>& listLengths, int k, faiss::MetricType metric, bool useResidual, Tensor<float, 3, true>& residualBase, GpuScalarQuantizer* scalarQ, // output Tensor<float, 2, true>& outDistances, // output Tensor<Index::idx_t, 2, true>& outIndices, GpuResources* res) { // caught for exceptions at a higher level FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); if (k == 1) { IVF_INTERLEAVED_CALL(1); } else if (k <= 32) { IVF_INTERLEAVED_CALL(32); } else if (k <= 64) { IVF_INTERLEAVED_CALL(64); } else if (k <= 128) { IVF_INTERLEAVED_CALL(128); } else if (k <= 256) { IVF_INTERLEAVED_CALL(256); } else if (k <= 512) { IVF_INTERLEAVED_CALL(512); } else if (k <= 1024) { IVF_INTERLEAVED_CALL(1024); } #if GPU_MAX_SELECTION_K >= 2048 else if (k <= 2048) { IVF_INTERLEAVED_CALL(2048); } #endif } } } // namespace
189e97a2c3677a69cf605d4d7aced22d47abd082.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/IVFInterleaved.cuh> #include <faiss/gpu/impl/scan/IVFInterleavedImpl.cuh> namespace faiss { namespace gpu { constexpr uint32_t kMaxUInt32 = std::numeric_limits<uint32_t>::max(); // Second-pass kernel to further k-select the results from the first pass across // IVF lists and produce the final results template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ> __global__ void ivfInterleavedScan2(Tensor<float, 3, true> distanceIn, Tensor<int, 3, true> indicesIn, Tensor<int, 2, true> listIds, int k, void** listIndices, IndicesOptions opt, bool dir, Tensor<float, 2, true> distanceOut, Tensor<Index::idx_t, 2, true> indicesOut) { int queryId = blockIdx.x; constexpr int kNumWarps = ThreadsPerBlock / kWarpSize; __shared__ float smemK[kNumWarps * NumWarpQ]; __shared__ uint32_t smemV[kNumWarps * NumWarpQ]; // To avoid creating excessive specializations, we combine direction kernels, // selecting for the smallest element. If `dir` is true, we negate all values // being selected (so that we are selecting the largest element). BlockSelect<float, uint32_t, false, Comparator<float>, NumWarpQ, NumThreadQ, ThreadsPerBlock> heap(kFloatMax, kMaxUInt32, smemK, smemV, k); // nprobe x k int num = distanceIn.getSize(1) * distanceIn.getSize(2); auto distanceBase = distanceIn[queryId].data(); int limit = utils::roundDown(num, kWarpSize); // This will keep our negation factor float adj = dir ? -1 : 1; int i = threadIdx.x; for (; i < limit; i += blockDim.x) { // We represent the index as (probe id)(k) // Right now, both are limited to a maximum of 2048, but we will dedicate // each to the high and low words of a uint32_t static_assert(GPU_MAX_SELECTION_K <= 65536, ""); uint32_t curProbe = i / k; uint32_t curK = i % k; uint32_t index = (curProbe << 16) | (curK & (uint32_t) 0xffff); int listId = listIds[queryId][curProbe]; if (listId != -1) { // Adjust the value we are selecting based on the sorting order heap.addThreadQ(distanceBase[i] * adj, index); } heap.checkThreadQ(); } // Handle warp divergence separately if (i < num) { uint32_t curProbe = i / k; uint32_t curK = i % k; uint32_t index = (curProbe << 16) | (curK & (uint32_t) 0xffff); int listId = listIds[queryId][curProbe]; if (listId != -1) { heap.addThreadQ(distanceBase[i] * adj, index); } } // Merge all final results heap.reduce(); for (int i = threadIdx.x; i < k; i += blockDim.x) { // Re-adjust the value we are selecting based on the sorting order distanceOut[queryId][i] = smemK[i] * adj; auto packedIndex = smemV[i]; // We need to remap to the user-provided indices Index::idx_t index = -1; // We may not have at least k values to return; in this function, max uint32 // is our sentinel value if (packedIndex != kMaxUInt32) { uint32_t curProbe = packedIndex >> 16; uint32_t curK = packedIndex & 0xffff; int listId = listIds[queryId][curProbe]; int listOffset = indicesIn[queryId][curProbe][curK]; if (opt == INDICES_32_BIT) { index = (Index::idx_t) ((int*) listIndices[listId])[listOffset]; } else if (opt == INDICES_64_BIT) { index = ((Index::idx_t*) listIndices[listId])[listOffset]; } else { index = ((Index::idx_t) listId << 32 | (Index::idx_t) listOffset); } } indicesOut[queryId][i] = index; } } void runIVFInterleavedScan2(Tensor<float, 3, true>& distanceIn, Tensor<int, 3, true>& indicesIn, Tensor<int, 2, true>& listIds, int k, thrust::device_vector<void*>& listIndices, IndicesOptions indicesOptions, bool dir, Tensor<float, 2, true>& distanceOut, Tensor<Index::idx_t, 2, true>& indicesOut, cudaStream_t stream) { #define IVF_SCAN_2(THREADS, NUM_WARP_Q, NUM_THREAD_Q) \ ivfInterleavedScan2<THREADS, NUM_WARP_Q, NUM_THREAD_Q> \ <<<distanceIn.getSize(0), THREADS, 0, stream>>>( \ distanceIn, \ indicesIn, \ listIds, \ k, \ listIndices.data().get(), \ indicesOptions, \ dir, \ distanceOut, \ indicesOut) if (k == 1) { IVF_SCAN_2(128, 1, 1); } else if (k <= 32) { IVF_SCAN_2(128, 32, 2); } else if (k <= 64) { IVF_SCAN_2(128, 64, 3); } else if (k <= 128) { IVF_SCAN_2(128, 128, 3); } else if (k <= 256) { IVF_SCAN_2(128, 256, 4); } else if (k <= 512) { IVF_SCAN_2(128, 512, 8); } else if (k <= 1024) { IVF_SCAN_2(128, 1024, 8); } #if GPU_MAX_SELECTION_K >= 2048 else if (k <= 2048) { IVF_SCAN_2(64, 2048, 8); } #endif } void runIVFInterleavedScan(Tensor<float, 2, true>& queries, Tensor<int, 2, true>& listIds, thrust::device_vector<void*>& listData, thrust::device_vector<void*>& listIndices, IndicesOptions indicesOptions, thrust::device_vector<int>& listLengths, int k, faiss::MetricType metric, bool useResidual, Tensor<float, 3, true>& residualBase, GpuScalarQuantizer* scalarQ, // output Tensor<float, 2, true>& outDistances, // output Tensor<Index::idx_t, 2, true>& outIndices, GpuResources* res) { // caught for exceptions at a higher level FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); if (k == 1) { IVF_INTERLEAVED_CALL(1); } else if (k <= 32) { IVF_INTERLEAVED_CALL(32); } else if (k <= 64) { IVF_INTERLEAVED_CALL(64); } else if (k <= 128) { IVF_INTERLEAVED_CALL(128); } else if (k <= 256) { IVF_INTERLEAVED_CALL(256); } else if (k <= 512) { IVF_INTERLEAVED_CALL(512); } else if (k <= 1024) { IVF_INTERLEAVED_CALL(1024); } #if GPU_MAX_SELECTION_K >= 2048 else if (k <= 2048) { IVF_INTERLEAVED_CALL(2048); } #endif } } } // namespace
1405a0c8516067b2bcf10b7596fc11a370f739e5.hip
// !!! This is a file automatically generated by hipify!!! #include <cudf/copying.hpp> #include <cudf/join.hpp> #include <cudf/table/table_view.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/error.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/gather.hpp> #include <hash/concurrent_unordered_map.cuh> #include <join/join_common_utils.hpp> #include <cudf/detail/gather.cuh> #include <join/hash_join.cuh> namespace cudf { namespace experimental { namespace detail { /** * @brief Performs a left semi or anti join on the specified columns of two * tables (left, right) * * The semi and anti joins only return data from the left table. A left semi join * returns rows that exist in the right table, a left anti join returns rows * that do not exist in the right table. * * The basic approach is to create a hash table containing the contents of the right * table and then select only rows that exist (or don't exist) to be included in * the return set. * * @throws cudf::logic_error if number of columns in either `left` or `right` table is 0 * @throws cudf::logic_error if number of returned columns is 0 * @throws cudf::logic_error if number of elements in `right_on` and `left_on` are not equal * * @param[in] left The left table * @param[in] right The right table * @param[in] left_on The column indices from `left` to join on. * The column from `left` indicated by `left_on[i]` * will be compared against the column from `right` * indicated by `right_on[i]`. * @param[in] right_on The column indices from `right` to join on. * The column from `right` indicated by `right_on[i]` * will be compared against the column from `left` * indicated by `left_on[i]`. * @param[in] return_columns A vector of column indices from `left` to * include in the returned table. * @param[in] mr Device memory resource to use for device memory allocation * @param[in] stream Cuda stream * @tparam join_kind Indicates whether to do LEFT_SEMI_JOIN or LEFT_ANTI_JOIN * * @returns Result of joining `left` and `right` tables on the columns * specified by `left_on` and `right_on`. The resulting table * will contain `return_columns` from `left` that match in right. */ template <join_kind JoinKind> std::unique_ptr<cudf::experimental::table> left_semi_anti_join(cudf::table_view const& left, cudf::table_view const& right, std::vector<cudf::size_type> const& left_on, std::vector<cudf::size_type> const& right_on, std::vector<cudf::size_type> const& return_columns, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { CUDF_EXPECTS (0 != left.num_columns(), "Left table is empty"); CUDF_EXPECTS (0 != right.num_columns(), "Right table is empty"); CUDF_EXPECTS (left_on.size() == right_on.size(), "Mismatch in number of columns to be joined on"); if (0 == return_columns.size()) { return experimental::empty_like(left.select(return_columns)); } if (is_trivial_join(left, right, left_on, right_on, JoinKind)) { return experimental::empty_like(left.select(return_columns)); } if ((join_kind::LEFT_ANTI_JOIN == JoinKind) && (0 == right.num_rows())) { // Everything matches, just copy the proper columns from the left table return std::make_unique<experimental::table>(left.select(return_columns), stream, mr); } // Only care about existence, so we'll use an unordered map (other joins need a multimap) using hash_table_type = concurrent_unordered_map<cudf::size_type, bool8, row_hash, row_equality>; // Create hash table containing all keys found in right table auto right_rows_d = table_device_view::create(right.select(right_on), stream); size_t const hash_table_size = compute_hash_table_size(right.num_rows()); row_hash hash_build{*right_rows_d}; row_equality equality_build{*right_rows_d, *right_rows_d}; // Going to join it with left table auto left_rows_d = table_device_view::create(left.select(left_on), stream); row_hash hash_probe{*left_rows_d}; row_equality equality_probe{*left_rows_d, *right_rows_d}; auto hash_table_ptr = hash_table_type::create(hash_table_size, std::numeric_limits<bool8>::max(), std::numeric_limits<cudf::size_type>::max(), hash_build, equality_build); auto hash_table = *hash_table_ptr; thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), right.num_rows(), [hash_table] __device__ (size_type idx) mutable { hash_table.insert(thrust::make_pair(idx, true)); }); // // Now we have a hash table, we need to iterate over the rows of the left table // and check to see if they are contained in the hash table // // For semi join we want contains to be true, for anti join we want contains to be false bool join_type_boolean = (JoinKind == join_kind::LEFT_SEMI_JOIN); rmm::device_vector<size_type> gather_map(left.num_rows()); // gather_map_end will be the end of valid data in gather_map auto gather_map_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(left.num_rows()), gather_map.begin(), [hash_table, join_type_boolean, hash_probe, equality_probe] __device__ (size_type idx) { auto pos = hash_table.find(idx, hash_probe, equality_probe); return (pos != hash_table.end()) == join_type_boolean; }); return cudf::experimental::detail::gather(left.select(return_columns), gather_map.begin(), gather_map_end, false, mr); } } // detail std::unique_ptr<cudf::experimental::table> left_semi_join(cudf::table_view const& left, cudf::table_view const& right, std::vector<cudf::size_type> const& left_on, std::vector<cudf::size_type> const& right_on, std::vector<cudf::size_type> const& return_columns, rmm::mr::device_memory_resource* mr) { return detail::left_semi_anti_join<detail::join_kind::LEFT_SEMI_JOIN>(left, right, left_on, right_on, return_columns, mr, 0); } std::unique_ptr<cudf::experimental::table> left_anti_join(cudf::table_view const& left, cudf::table_view const& right, std::vector<cudf::size_type> const& left_on, std::vector<cudf::size_type> const& right_on, std::vector<cudf::size_type> const& return_columns, rmm::mr::device_memory_resource* mr) { return detail::left_semi_anti_join<detail::join_kind::LEFT_ANTI_JOIN>(left, right, left_on, right_on, return_columns, mr, 0); } } // experimental } // cudf
1405a0c8516067b2bcf10b7596fc11a370f739e5.cu
#include <cudf/copying.hpp> #include <cudf/join.hpp> #include <cudf/table/table_view.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/error.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/gather.hpp> #include <hash/concurrent_unordered_map.cuh> #include <join/join_common_utils.hpp> #include <cudf/detail/gather.cuh> #include <join/hash_join.cuh> namespace cudf { namespace experimental { namespace detail { /** * @brief Performs a left semi or anti join on the specified columns of two * tables (left, right) * * The semi and anti joins only return data from the left table. A left semi join * returns rows that exist in the right table, a left anti join returns rows * that do not exist in the right table. * * The basic approach is to create a hash table containing the contents of the right * table and then select only rows that exist (or don't exist) to be included in * the return set. * * @throws cudf::logic_error if number of columns in either `left` or `right` table is 0 * @throws cudf::logic_error if number of returned columns is 0 * @throws cudf::logic_error if number of elements in `right_on` and `left_on` are not equal * * @param[in] left The left table * @param[in] right The right table * @param[in] left_on The column indices from `left` to join on. * The column from `left` indicated by `left_on[i]` * will be compared against the column from `right` * indicated by `right_on[i]`. * @param[in] right_on The column indices from `right` to join on. * The column from `right` indicated by `right_on[i]` * will be compared against the column from `left` * indicated by `left_on[i]`. * @param[in] return_columns A vector of column indices from `left` to * include in the returned table. * @param[in] mr Device memory resource to use for device memory allocation * @param[in] stream Cuda stream * @tparam join_kind Indicates whether to do LEFT_SEMI_JOIN or LEFT_ANTI_JOIN * * @returns Result of joining `left` and `right` tables on the columns * specified by `left_on` and `right_on`. The resulting table * will contain `return_columns` from `left` that match in right. */ template <join_kind JoinKind> std::unique_ptr<cudf::experimental::table> left_semi_anti_join(cudf::table_view const& left, cudf::table_view const& right, std::vector<cudf::size_type> const& left_on, std::vector<cudf::size_type> const& right_on, std::vector<cudf::size_type> const& return_columns, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { CUDF_EXPECTS (0 != left.num_columns(), "Left table is empty"); CUDF_EXPECTS (0 != right.num_columns(), "Right table is empty"); CUDF_EXPECTS (left_on.size() == right_on.size(), "Mismatch in number of columns to be joined on"); if (0 == return_columns.size()) { return experimental::empty_like(left.select(return_columns)); } if (is_trivial_join(left, right, left_on, right_on, JoinKind)) { return experimental::empty_like(left.select(return_columns)); } if ((join_kind::LEFT_ANTI_JOIN == JoinKind) && (0 == right.num_rows())) { // Everything matches, just copy the proper columns from the left table return std::make_unique<experimental::table>(left.select(return_columns), stream, mr); } // Only care about existence, so we'll use an unordered map (other joins need a multimap) using hash_table_type = concurrent_unordered_map<cudf::size_type, bool8, row_hash, row_equality>; // Create hash table containing all keys found in right table auto right_rows_d = table_device_view::create(right.select(right_on), stream); size_t const hash_table_size = compute_hash_table_size(right.num_rows()); row_hash hash_build{*right_rows_d}; row_equality equality_build{*right_rows_d, *right_rows_d}; // Going to join it with left table auto left_rows_d = table_device_view::create(left.select(left_on), stream); row_hash hash_probe{*left_rows_d}; row_equality equality_probe{*left_rows_d, *right_rows_d}; auto hash_table_ptr = hash_table_type::create(hash_table_size, std::numeric_limits<bool8>::max(), std::numeric_limits<cudf::size_type>::max(), hash_build, equality_build); auto hash_table = *hash_table_ptr; thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), right.num_rows(), [hash_table] __device__ (size_type idx) mutable { hash_table.insert(thrust::make_pair(idx, true)); }); // // Now we have a hash table, we need to iterate over the rows of the left table // and check to see if they are contained in the hash table // // For semi join we want contains to be true, for anti join we want contains to be false bool join_type_boolean = (JoinKind == join_kind::LEFT_SEMI_JOIN); rmm::device_vector<size_type> gather_map(left.num_rows()); // gather_map_end will be the end of valid data in gather_map auto gather_map_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(left.num_rows()), gather_map.begin(), [hash_table, join_type_boolean, hash_probe, equality_probe] __device__ (size_type idx) { auto pos = hash_table.find(idx, hash_probe, equality_probe); return (pos != hash_table.end()) == join_type_boolean; }); return cudf::experimental::detail::gather(left.select(return_columns), gather_map.begin(), gather_map_end, false, mr); } } // detail std::unique_ptr<cudf::experimental::table> left_semi_join(cudf::table_view const& left, cudf::table_view const& right, std::vector<cudf::size_type> const& left_on, std::vector<cudf::size_type> const& right_on, std::vector<cudf::size_type> const& return_columns, rmm::mr::device_memory_resource* mr) { return detail::left_semi_anti_join<detail::join_kind::LEFT_SEMI_JOIN>(left, right, left_on, right_on, return_columns, mr, 0); } std::unique_ptr<cudf::experimental::table> left_anti_join(cudf::table_view const& left, cudf::table_view const& right, std::vector<cudf::size_type> const& left_on, std::vector<cudf::size_type> const& right_on, std::vector<cudf::size_type> const& return_columns, rmm::mr::device_memory_resource* mr) { return detail::left_semi_anti_join<detail::join_kind::LEFT_ANTI_JOIN>(left, right, left_on, right_on, return_columns, mr, 0); } } // experimental } // cudf
838593c938e323f75f678f60d556f470f3a23b9d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "warmup.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( warmup), dim3(gridBlock),dim3(threadBlock), 0, 0, out,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( warmup), dim3(gridBlock),dim3(threadBlock), 0, 0, out,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( warmup), dim3(gridBlock),dim3(threadBlock), 0, 0, out,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
838593c938e323f75f678f60d556f470f3a23b9d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "warmup.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); warmup<<<gridBlock,threadBlock>>>(out,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { warmup<<<gridBlock,threadBlock>>>(out,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { warmup<<<gridBlock,threadBlock>>>(out,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ebcb180e454c151ab2549a40c38e7e95cfe11bd4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" #define n (3) #define qPoints (58) #define qPolygons (96) __global__ void obtainPolygonsDOUBLE(int* dev_S,int* dev_polygonToFillX,int* dev_polygonToFillY,int* dev_shipLocationX, int* dev_shipLocationZ,double* dev_matrixC,int* dev_points,int* dev_polygons,int* dev_normals,const int N) { int j = threadIdx.x; if (j<N){ double observerCoord[3*qPoints]; for (int i = 0; i < qPoints; i++) { observerCoord[3*i] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[0] + dev_points[3*i+1] * dev_matrixC[1] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[2] +dev_matrixC[3]; observerCoord[3*i+1] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[4] + dev_points[3*i+1] * dev_matrixC[5] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[6] + dev_matrixC[7]; observerCoord[3*i+2] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[8] + dev_points[3*i+1] * dev_matrixC[9] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[10] + dev_matrixC[11]; } int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = (int) (4.35f * 160 * 0.6f * observerCoord[3*i+2] / observerCoord[3*i]) + 350; yp[i] = -(int) (4.35f * 160 * 0.6f * observerCoord[3*i+1] / observerCoord[3*i]) + 350; } for (int i = 0; i < qPolygons; i++) { if ( ( (dev_normals[3*i] * dev_matrixC[0] + dev_normals[3*i+1] * dev_matrixC[1] + dev_normals[3*i+2] * dev_matrixC[2]) * observerCoord[3*dev_polygons[3*i]] + (dev_normals[3*i] * dev_matrixC[4] + dev_normals[3*i+1]* dev_matrixC[5] + dev_normals[3*+2] * dev_matrixC[6]) * observerCoord[3*dev_polygons[3*i]+1] + (dev_normals[3*i] * dev_matrixC[8] + dev_normals[3*i+1] * dev_matrixC[9] + dev_normals[3*i+2] * dev_matrixC[10]) * observerCoord[3*dev_polygons[3*i]+2])<0){ dev_S[j*qPolygons+i]=1; } else { dev_S[j*qPolygons+i]=0; } } for (int i = 0; i < qPolygons; i++) { dev_polygonToFillX[j*qPolygons*n+3*i] = xp[dev_polygons[3*i]]; dev_polygonToFillX[j*qPolygons*n+3*i+1] = xp[dev_polygons[3*i+1]]; dev_polygonToFillX[j*qPolygons*n+3*i+2] = xp[dev_polygons[3*i+2]]; dev_polygonToFillY[j*qPolygons*n+3*i] = yp[dev_polygons[3*i]]; dev_polygonToFillY[j*qPolygons*n+3*i+1] = yp[dev_polygons[3*i+1]]; dev_polygonToFillY[j*qPolygons*n+3*i+2] = yp[dev_polygons[3*i+2]]; } } }
ebcb180e454c151ab2549a40c38e7e95cfe11bd4.cu
extern "C" #define n (3) #define qPoints (58) #define qPolygons (96) __global__ void obtainPolygonsDOUBLE(int* dev_S,int* dev_polygonToFillX,int* dev_polygonToFillY,int* dev_shipLocationX, int* dev_shipLocationZ,double* dev_matrixC,int* dev_points,int* dev_polygons,int* dev_normals,const int N) { int j = threadIdx.x; if (j<N){ double observerCoord[3*qPoints]; for (int i = 0; i < qPoints; i++) { observerCoord[3*i] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[0] + dev_points[3*i+1] * dev_matrixC[1] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[2] +dev_matrixC[3]; observerCoord[3*i+1] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[4] + dev_points[3*i+1] * dev_matrixC[5] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[6] + dev_matrixC[7]; observerCoord[3*i+2] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[8] + dev_points[3*i+1] * dev_matrixC[9] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[10] + dev_matrixC[11]; } int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = (int) (4.35f * 160 * 0.6f * observerCoord[3*i+2] / observerCoord[3*i]) + 350; yp[i] = -(int) (4.35f * 160 * 0.6f * observerCoord[3*i+1] / observerCoord[3*i]) + 350; } for (int i = 0; i < qPolygons; i++) { if ( ( (dev_normals[3*i] * dev_matrixC[0] + dev_normals[3*i+1] * dev_matrixC[1] + dev_normals[3*i+2] * dev_matrixC[2]) * observerCoord[3*dev_polygons[3*i]] + (dev_normals[3*i] * dev_matrixC[4] + dev_normals[3*i+1]* dev_matrixC[5] + dev_normals[3*+2] * dev_matrixC[6]) * observerCoord[3*dev_polygons[3*i]+1] + (dev_normals[3*i] * dev_matrixC[8] + dev_normals[3*i+1] * dev_matrixC[9] + dev_normals[3*i+2] * dev_matrixC[10]) * observerCoord[3*dev_polygons[3*i]+2])<0){ dev_S[j*qPolygons+i]=1; } else { dev_S[j*qPolygons+i]=0; } } for (int i = 0; i < qPolygons; i++) { dev_polygonToFillX[j*qPolygons*n+3*i] = xp[dev_polygons[3*i]]; dev_polygonToFillX[j*qPolygons*n+3*i+1] = xp[dev_polygons[3*i+1]]; dev_polygonToFillX[j*qPolygons*n+3*i+2] = xp[dev_polygons[3*i+2]]; dev_polygonToFillY[j*qPolygons*n+3*i] = yp[dev_polygons[3*i]]; dev_polygonToFillY[j*qPolygons*n+3*i+1] = yp[dev_polygons[3*i+1]]; dev_polygonToFillY[j*qPolygons*n+3*i+2] = yp[dev_polygons[3*i+2]]; } } }
5280268e242cb10169a02c2675e7375155a417f8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <math_functions.h> #include "./inc/chealpix.h" #include "./inc/chealpix.cu" #include "./inc/rates.cu" //__device__ float energy[FREQ_BIN_NUM] = {16.74, 24.65, 34.49, 52.06}; //__device__ float energy[FREQ_BIN_NUM] = {13.60, 24.65, 34.49, 52.06}; //float gfn[FREQ_BIN_NUM] = {1.0, 0.0, 0.0, 0.0}; //float gfn[FREQ_BIN_NUM] = { 0.5, 0.5, 0.0, 0.0}; //float gfn[FREQ_BIN_NUM] = { 0.25, 0.25, 0.25, 0.25}; //float gfn[FREQ_BIN_NUM] = { 0.277, 0.335, 0.2, 0.188}; //__device__ float gfn[FREQ_BIN_NUM] = { 0.277, 0.335, 0.2, 0.188}; //__device__ float gfn[FREQ_BIN_NUM] = { 0.465, 0.335, 0.2, 0.0}; // RAMSES //__device__ float energy[FREQ_BIN_NUM] = {1.440E+01, 1.990E+01, 3.508E+01, 3.508E+01}; __device__ float energy[FREQ_BIN_NUM] = {13.60, 24.60, 35.08, 35.08}; float gfn[FREQ_BIN_NUM] = { 0.414, 0.586, 0.0, 0.0}; //__device__ float energy[FREQ_BIN_NUM] = {17.00, 17.00, 17.00, 17.00}; //float gfn[FREQ_BIN_NUM] = {1.0, 0.0, 0.0, 0.0}; //__device__ float gfn[FREQ_BIN_NUM] = {1.0, 0.0, 0.0, 0.0}; __device__ float time(float redshift) { float h = 0.6711; float h0 = h*3.246753e-18; float omegam = 0.3; float yrtos = 3.15569e7; float time = 2.*powf((1. + redshift), -3. / 2.) / (3.*h0*powf(omegam, 0.5)); time = time / (yrtos*1.e6); return time; } __device__ float redshift(float time) { float h = 0.6711; float h0 = h*3.246753e-18; float omegam = 0.3; float yrtos = 3.15569e7; time = time*yrtos*1.e6; float redshift = powf((3.*h0*powf(omegam, 0.5)*time / 2.), -2. / 3.) - 1.; return redshift; } /*inline __device__ void sigma(float sig[][FREQ_BIN_NUM]) { sig[0][0] = 3.4630495932136023e-18; // 17.0 eV sig[0][1] = 0.0; // 10.0 eV sig[0][2] = 0.0; // 10.0 eV sig[0][3] = 0.0; // 10.0 eV sig[1][0] = 0.0; // 17.0 eV sig[1][1] = 0.0; // 10.0 eV sig[1][2] = 0.0; // 10.0 eV sig[1][3] = 0.0; // 10.0 eV }*/ /*inline __device__ void sigma(float sig[][FREQ_BIN_NUM]) { sig[0][0] = 6.2998764713e-18; // 13.6 eV sig[0][1] = 1.23034961639e-18; // 24.65 eV sig[0][2] = 4.70442276047e-19; // 34.49 eV sig[0][3] = 1.40170632038e-19; // 52.06 eV sig[1][0] = 0; // 16.74 eV sig[1][1] = 7.77983571966e-18; // 24.65 eV sig[1][2] = 4.20352986507e-18; // 34.49 eV sig[1][3] = 1.90620549625e-18; // 52.06 eV //sig[1][1] = 0; // 24.65 eV //sig[1][2] = 0; // 34.49 eV }*/ // RAMSES inline __device__ void sigma(float sig[][FREQ_BIN_NUM]) { sig[0][0] = 3.007e-18; // 13.6 eV sig[0][1] = 5.687e-19; // 24.65 eV sig[0][2] = 7.889e-20; // 34.49 eV sig[0][3] = 1.0; // 52.06 eV sig[1][0] = 0; // 16.74 eV sig[1][1] = 4.478e-18; // 24.65 eV sig[1][2] = 1.197e-18; // 34.49 eV sig[1][3] = 1.0; // 52.06 eV //sig[1][1] = 0; // 24.65 eV //sig[1][2] = 0; // 34.49 eV } /*inline __device__ void sigma(float sig[][FREQ_BIN_NUM]) { sig[0][0] = 1.23034961639e-18; // 13.6 eV sig[0][1] = 1.23034961639e-18; // 24.65 eV sig[0][2] = 4.70442276047e-19; // 34.49 eV sig[0][3] = 4.70442276047e-19; // 52.06 eV sig[1][0] = 0; // 16.74 eV sig[1][1] = 7.77983571966e-18; // 24.65 eV sig[1][2] = 4.20352986507e-18; // 34.49 eV sig[1][3] = 1.90620549625e-18; // 52.06 eV //sig[1][1] = 0; // 24.65 eV //sig[1][2] = 0; // 34.49 eV }*/ __device__ static float atomicMax(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fmaxf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } __device__ void step_bdf(float* yout, float* y, float* dGdx, float n, float E, float dt) { float xe = (1.0-Y_P)*(1.0-y[0])+0.25*Y_P*(1.0-y[1]); float ne = n*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); int CASE = 1; // Recombination rates (Black81) float al[2]; al[0] = rec_HII(T, CASE); al[1] = rec_HeII(T, CASE); // Collision Excitation float col[2]; col[0] = col_HI(T); col[1] = col_HeI(T); for(int nSpe=0; nSpe < SPECIES; nSpe++) { // Constant alpha //float source = al[nSpe]*(1.0-y[nSpe])*ne*dt + y[nSpe]; //float sink = 1.0 + (dGdx[nSpe] + col[nSpe]*ne)*dt; // Variable alpha float source = al[nSpe]*(1.0-y[nSpe])*ne*dt + y[nSpe]; float sink = 1.0 + (dGdx[nSpe] + al[nSpe]*ne + col[nSpe]*ne)*dt; // Basic, no alpha //float source = y[nSpe]; //float sink = 1.0 + (dGdx[nSpe])*dt; yout[nSpe] = source/sink; /*yout[nSpe] = y[nSpe]; yout[nSpe] += dt*(-gam[nSpe] - col[nSpe]*ne*y[nSpe]); yout[nSpe] += dt*al[nSpe]*(1.0-y[nSpe])*ne;*/ //yout[nSpe] = y[nSpe]; //yout[nSpe] += dt*(-dGdx[nSpe]*y[nSpe]); //yout[nSpe] += dt*al[nSpe]*(1.0-y[nSpe])*ne; // Quadratic formulation: /*float A = al[nSpe]*n; float B = -dGdx[nSpe] - 2.0*al[nSpe]*n; float C = al[nSpe]*n; float Anew = -A*dt; float Bnew = 1.0-dt*B; float Cnew = -y[nSpe]-C*dt; yout[nSpe] = (-Bnew + sqrt(Bnew*Bnew-4*Anew*Cnew))/(2*Anew); yout[nSpe] = MAX(yout[nSpe],1.0e-10); yout[nSpe] = MIN(yout[nSpe],1.0e0);*/ } } __device__ float dnHIdt(float* y, float* dGdx, float n, float E) { float xe = (1.0-Y_P)*(1.0-y[0])+0.25*Y_P*(1.0-y[1]); float ne = n*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); int CASE = 1; // Recombination rates (Black81) float al[2]; al[0] = rec_HII(T, CASE); al[1] = rec_HeII(T, CASE); // Collision Excitation float col[2]; col[0] = col_HI(T); col[1] = col_HeI(T); int nBin=0; float x = -dGdx[nBin]*y[nBin] - col[nBin]*ne*y[nBin] + al[nBin]*(1.0-y[nBin])*ne; return x; } __device__ float lambda(float E, float* y, float n, float a) { int CASE = 1; float dEdt = 0.0; float xe = (1.0-Y_P)*(1.0-y[0])+0.25*Y_P*(1.0-y[1]); float ne = n*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); float xHI = (1-Y_P)*y[0]; float xHII = (1-Y_P)*(1.0 - y[0]); float xHeI = 0.25*Y_P*y[1]; float xHeII = 0.0; //float xHeIII = 0.0; float colH = col_cool_HI(T)*ne*xHI; float colHe = col_cool_HeI(T)*ne*xHeI; float recH = rec_cool_HII(T, CASE)*ne*xHII; float recHe = rec_cool_HeII(T, CASE)*ne*xHeII; float colexH = colex_HI(T)*ne*xHI; float brem = 1.42e-27*powf(T,0.5)*ne*ne*6.242e11/n; dEdt += colH + recH + colexH; dEdt += colHe + recHe; dEdt += brem; // Adiabatic cooling: float H0 = 67.11*3.241e-20; dEdt += 3.0*H0*0.5477*powf(a, -1.5)*E; return dEdt; } __device__ float thin_source(float source, float fraction) { if(fraction < 1.e-30) { return 0.0; } else { return source/fraction; } } __global__ void timestep( double* rate, float dt, float* density, float* x_N, float* FluxArray, float* EArray, float* background, int dim, float L, float a) { int nelements=dim*dim*dim; int i0 = blockIdx.x*blockDim.x+threadIdx.x; int j0 = blockIdx.y*blockDim.y+threadIdx.y; int k0 = blockIdx.z; int ind=i0+dim*j0+dim*dim*k0; float xn[SPECIES]; float Gamma[SPECIES]; for(int nSpe=0; nSpe < SPECIES; nSpe++) { xn[nSpe] = x_N[ind + nSpe*nelements]; Gamma[nSpe] = FluxArray[ind + nSpe*nelements]; Gamma[nSpe] += background[nSpe]; // Ionization rate for each species (per Myr) } float dens = density[ind]/(a*a*a); // Baryonic number density float E = EArray[ind]; // Energy (temperature) per baryon // Subcycle loop to advance the chemistry // First we calculate the optically thin approximation of source terms: float dGdx[SPECIES]; for(int nSpe=0; nSpe<SPECIES; nSpe++) { dGdx[nSpe] = thin_source(Gamma[nSpe], xn[nSpe]); } // Find the max time step to advance hydrogen float dxdt; dxdt = abs(dnHIdt(xn, dGdx, dens, E)); float dx = (L/DIMX)*a; float sig = 1.111e7; // sig[0][0]*cm in a Mpc float tau = max(dens*sig*dx, 3.0); // dIdt //atomicMax(rate+1, dxdt*tau); // dnHdt //if(tau > 0.5) //atomicMax(rate, dxdt/xn[0]); // dt[ind] = dxdt/xn[0]; // Filter // Conservation calculation (rate[2]) float fDtMyr = dt*3.15e13; float xn_out[2]; float xe = (1.0-Y_P)*(1.0-xn[0])+0.25*Y_P*(1.0-xn[1]); float ne = dens*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); int CASE = 1; // Recombination rates (Black81) float al[2]; al[0] = rec_HII(T, CASE); al[1] = rec_HeII(T, CASE); // Collision Excitation float col[2]; col[0] = col_HI(T); col[1] = col_HeI(T); float fRec[2], x_eq, delta_x[2]; for(int nSpe=0; nSpe < SPECIES; nSpe++) { fRec[nSpe] = al[nSpe]*(1.0-xn[nSpe])*ne;//*(a*a*a);// volume weighted // Tracking neutral float C = dGdx[nSpe] + col[nSpe]*ne; float D = al[nSpe]*(1.0-xn[nSpe])*ne; //xn_out[nSpe] = (xn[nSpe] + D*fDtMyr)/(1 + C*fDtMyr); // Tracking ionized float x_ion = 1.0 - xn[nSpe]; x_ion = (x_ion + (C-D)*fDtMyr)/(1+C*fDtMyr); xn_out[nSpe] = 1.0 - x_ion; //xn_out[nSpe] = xn[nSpe] - dGdx[nSpe]*fDtMyr; //fRec = 0.0; if(xn[nSpe] > -0.05) { xn_out[nSpe] = xn[nSpe] - Gamma[nSpe]*fDtMyr + fRec[nSpe]*fDtMyr; } //xn_out[nSpe] = xn[nSpe] - Gamma[nSpe]*fDtMyr + fRec*fDtMyr; xn_out[nSpe] = MAX(xn_out[nSpe], 1.0e-10); xn_out[nSpe] = MIN(xn_out[nSpe], 1.0); delta_x[nSpe] = xn[nSpe] - xn_out[nSpe]; } //float delta_x = xn[0] - xn_out[0]; //fRec = al[0]*(1.0-xn[0])*ne; float cell_ratio = 0.0; //cell_ratio = 1 - (delta_x + fRec*fDtMyr)/(Gamma[0]*fDtMyr); atomicAdd(rate+0, double(delta_x[0]*density[ind])); atomicAdd(rate+1, double(Gamma[0]*density[ind])); atomicAdd(rate+2, double(fRec[0]*density[ind])); atomicAdd(rate+3, double(delta_x[1]*density[ind])); atomicAdd(rate+4, double(Gamma[1]*density[ind])); atomicAdd(rate+5, double(fRec[1]*density[ind])); //atomicAdd(rate+3, count_out*density[ind]); __syncthreads(); } __global__ void ionization( float dt, float* error, float* density, float* x_N, float* FluxArray, float* EArray, float* dEArray, float* background, int dim, float a) { int nelements=dim*dim*dim; int i0 = blockIdx.x*blockDim.x+threadIdx.x; int j0 = blockIdx.y*blockDim.y+threadIdx.y; int k0 = blockIdx.z; int index=i0+dim*j0+dim*dim*k0; float fDtMyr = dt*3.15e13; // float t = time(1.0/a - 1.0); float xn[SPECIES]; float xn_out[SPECIES]; float Gamma[SPECIES]; float fCumFlux = 0; for(int nSpe=0; nSpe < SPECIES; nSpe++) { xn[nSpe] = x_N[index+nSpe*nelements]; Gamma[nSpe] = FluxArray[index+nSpe*nelements]; Gamma[nSpe] += background[nSpe]; // Ionization rate for each species (per Myr) fCumFlux += xn[3+nSpe]*fDtMyr; } float dens = density[index]/(a*a*a); // Baryonic number density float E = EArray[index]; // Energy (temperature) per baryon float dEdt = dEArray[index]; //float dEdt = dEArray[index]+background[2]+background[3]; // Subcycle loop to advance the chemistry // First we calculate the optically thin approximation of source terms: float dEdx, dGdx[SPECIES]; for(int nSpe=0; nSpe<SPECIES; nSpe++) { dGdx[nSpe] = thin_source(Gamma[nSpe], xn[nSpe]); } dEdx = thin_source(dEdt, xn[0]); E = E + dEdt*dt; //step_bdf(xn_out, xn, dGdx, dens, E, dtSub); //step_bdf(float* yout, float* y, float* dGdx, float n, float E, float dt) float xe = (1.0-Y_P)*(1.0-xn[0])+0.25*Y_P*(1.0-xn[1]); float ne = dens*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); int CASE = 1; // Recombination rates (Black81) float al[2]; al[0] = rec_HII(T, CASE); al[1] = rec_HeII(T, CASE); // Collision Excitation float col[2]; col[0] = col_HI(T); col[1] = col_HeI(T); float fRec, x_eq; for(int nSpe=0; nSpe < SPECIES; nSpe++) { //fRec = al[nSpe]*(1.0-xn[nSpe])*ne;//*(a*a*a);// volume weighted fRec = al[nSpe]*(1.0-xn[nSpe])*ne;//*(a*a*a);// volume weighted //fRec = 0.0; /*float source = al[nSpe]*(1.0-xn[nSpe])*ne*fDtMyr + xn[nSpe]; float sink = 1.0 + (dGdx[nSpe] + col[nSpe]*ne)*fDtMyr; xn_out[nSpe] = source/sink;*/ // Tracking neutral float C = dGdx[nSpe] + col[nSpe]*ne; float D = al[nSpe]*(1.0-xn[nSpe])*ne; //xn_out[nSpe] = (xn[nSpe] + D*fDtMyr)/(1 + C*fDtMyr); // Tracking ionized float x_ion = 1.0 - xn[nSpe]; x_ion = (x_ion + (C-D)*fDtMyr)/(1+C*fDtMyr); xn_out[nSpe] = 1.0 - x_ion; //xn_out[nSpe] = xn[nSpe] - dGdx[nSpe]*fDtMyr; //fRec = 0.0; if(xn[nSpe] > 0.05) { xn_out[nSpe] = xn[nSpe] - Gamma[nSpe]*fDtMyr + fRec*fDtMyr; } //xn_out[nSpe] = xn[nSpe] - Gamma[nSpe]*fDtMyr + fRec*fDtMyr; //xn_out[nSpe] = atomicAdd(error, fRec*density[index]); //atomicAdd(error, 1); //xn_out[nBin] = xn[nBin]/(1+dGdx[nBin]*1*fDtMyr); //yout[nBin] += dt*al[nBin]*(1.0-y[nBin])*ne; } //x_eq = al[0]*(1.0-xn[0])*ne/(Gamma[0]+1.e-3); for(int nSpe=0; nSpe<SPECIES; nSpe++) { xn[nSpe] = xn_out[nSpe]; xn[nSpe] = MAX(xn[nSpe], 1.0e-10); //xn[nSpe] = MAX(xn[nSpe], x_eq); xn[nSpe] = MIN(xn[nSpe], 1.0); } __syncthreads(); for(int nSpe=0; nSpe<SPECIES; nSpe++) { x_N[index+nSpe*nelements] = xn[nSpe]; } //float xerr[SPECIES]; // Change the energy array if(E <= 0) EArray[index] = 0.0; //EArray[index] = E; else EArray[index] = E; //for(int i=0; i<SPECIES; i++) //{ // xerr[i] = xn_out[i]; //} // if(index == 1056832) // *error = xeq; // atomicMax(error, EArray[index]); // atomicAdd(error, al[nBin]*(1.0-y[nBin])*ne*dt); // __syncthreads(); } // This is here because derivs is inherently inline under CUDA architecture. //#include "./inc/rkck.cu" //#include "./inc/simpr.cu" __global__ void ionization1( float dt, float* error, float* density, float* x_N, float* FluxArray, float* EArray, float* dEArray, float* background, int dim, float a) { int nelements=dim*dim*dim; int i0 = blockIdx.x*blockDim.x+threadIdx.x; int j0 = blockIdx.y*blockDim.y+threadIdx.y; int k0 = blockIdx.z; int index=i0+dim*j0+dim*dim*k0; float fDtMyr = dt*3.15e13; // float t = time(1.0/a - 1.0); float xn[SPECIES]; float xn_out[SPECIES]; float Gamma[SPECIES]; float fCumFlux = 0; for(int nSpe=0; nSpe < SPECIES; nSpe++) { xn[nSpe] = x_N[index+nSpe*nelements]; Gamma[nSpe] = FluxArray[index+nSpe*nelements]; Gamma[nSpe] += background[nSpe]; // Ionization rate for each species (per Myr) fCumFlux += xn[3+nSpe]*fDtMyr; } float dens = density[index]/(a*a*a); // Baryonic number density float E = EArray[index]; // Energy (temperature) per baryon float dEdt = dEArray[index]; //float dEdt = dEArray[index]+background[2]+background[3]; float eps = 0.1; // Maximum fractional change during subcycle float fDtRem; // Remaining time in the subcycle loop // Subcycle loop to advance the chemistry // First we calculate the optically thin approximation of source terms: float dEdx, dGdx[SPECIES]; for(int nSpe=0; nSpe<SPECIES; nSpe++) { dGdx[nSpe] = thin_source(Gamma[nSpe], xn[nSpe]); } dEdx = thin_source(dEdt, xn[0]); // Recombination addition float xe = (1.0-Y_P)*(1.0-xn[0])+0.25*Y_P*(1.0-xn[1]); float ne = dens*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); int CASE = 1; float al[2]; al[0] = rec_HII(T, CASE); al[1] = rec_HeII(T, CASE); float fRec; for(int nSpe=0; nSpe<SPECIES; nSpe++) { fRec = al[nSpe]*(1.0-xn[nSpe])*ne; atomicAdd(error, fRec); } // Subcycle: fDtRem = fDtMyr; while(fDtRem > 1.0) // One second { // Find the max time step to advance E float dtSubE; float Lam = lambda(E, xn, dens, a); float Heat = dEdx*xn[0]; float dEdt = Heat - Lam; float rate = abs(dEdt); if(rate < eps*E/fDtRem) { dtSubE = fDtRem; } else { dtSubE = eps*E/rate; } // Find the max time step to advance hydrogen float dtSubH; rate = abs(dnHIdt(xn, dGdx, dens, E)); if(rate < eps*0.1/fDtRem) { dtSubH = fDtRem; } else { dtSubH = eps*0.1/rate; } float dtSub = min(dtSubE, dtSubH); // Updating energy //float E1 = E + dEdt*dtSub; //float dEdt1 = dEdx*xn[0] - lambda(E1, xn, dens, a); //E = MIN(2.e4*1.29e-4, E + (dEdt+dEdt1)*dtSub/2); E = E + dEdt*dtSub; step_bdf(xn_out, xn, dGdx, dens, E, dtSub); for(int nSpe=0; nSpe<SPECIES; nSpe++) { if (xn_out[nSpe] < 0.0) { xn[nSpe] = 0.0; } else if (xn_out[nSpe] <= 1.0) { xn[nSpe] = xn_out[nSpe]; } else { xn[nSpe] = 1.0; } } fDtRem = fDtRem - dtSub; } __syncthreads(); for(int nSpe=0; nSpe<SPECIES; nSpe++) { x_N[index+nSpe*nelements] = xn[nSpe]; } //float xerr[SPECIES]; // Change the energy array if(E <= 0) EArray[index] = 0.0; //EArray[index] = E; else EArray[index] = E; //for(int i=0; i<SPECIES; i++) //{ // xerr[i] = xn_out[i]; //} // if(index == 1056832) // *error = xeq; // atomicMax(error, EArray[index]); //atomicAdd(error, fCumFlux); // __syncthreads(); } // Signum function __device__ int sign(float x) { return (x > 0) - (x < 0); } // Does the HEALPix math but gives a float __device__ void fpix2vec_nest(long n, long m, float* vec) { double temp[3]; pix2vec_nest(n, m, temp); vec[0] = (float) temp[0]; vec[1] = (float) temp[1]; vec[2] = (float) temp[2]; } // Takes position (x0) and direction(u) and takes a step along integer grid to x __device__ float raystep(float* x, int* ijk, float* x0, int* ijk0, float* u) { // Minimum projection, to prevent divide by 0 float eps = 1.e-10; // Length of step float dS; // Direction of movement along each axis int s[3]; for(int i=0;i<3;i++) s[i] = sign(u[i]); // Distance to nearest cell face along each axis float r[3]; for(int i=0;i<3;i++) { if(s[i] != 0) r[i] = fabsf((ijk0[i] + (s[i]+1.0)/2.0) - x0[i])/MAX(eps,fabsf(u[i])); else r[i] = 1.0/eps; } // Initialize next step for(int i=0;i<3;i++) ijk[i] = ijk0[i]; // Take the step if(r[0] <= r[1] && r[0] <= r[2]) { dS = r[0]; ijk[0] += s[0]; } if(r[1] <= r[0] && r[1] <= r[2]) { dS = r[1]; ijk[1] += s[1]; } if(r[2] <= r[0] && r[2] <= r[1]) { dS = r[2]; ijk[2] += s[2]; } for(int i=0;i<3;i++) x[i] = x0[i] + dS*u[i]; return dS; } __device__ int rayFinish(Ray *ray, int nDom, Domain domain) { if(nDom == domain.get_id()) { printf("Problem: attempting to send ray to self."); ray->set_dom(-1); return 1; } for(int dom=0; dom<8; dom++) { if(nDom == dom) { ray->set_dom(dom); return 1; } } ray->set_dom(-1); return 1; } __device__ void round_down(int * I, float * X) { I[0] = __double2int_rd(X[0]); I[1] = __double2int_rd(X[1]); I[2] = __double2int_rd(X[2]); } // Returns 1 if the ray is outside the domain __device__ int BoundaryCheck(float * X, int * I, int DIM) { for(int i=0; i<3; i++) { if( X[i] < 0 || I[i] < 0 || X[i] >= DIM || I[i] >= DIM) { return 1; } } return 0; } // For tracking rays // X and I are ray position and gridloc, vec is the direction of the ray // mode is for adjusting rays tracked by the tracer (0) or placed on the grid (1) __device__ void BoundaryAdjustOld(float * X, int * I, float* vec, int mode, int DIM) { for(int i=0; i<3; i++) { if(I[i] < 0) { X[i] += DIM; if(mode == 0) { I[i] += DIM; } else { I[i] = static_cast<int>(X[i]); } } if(I[i] >= DIM) { X[i] -= DIM; if(mode == 0) { I[i] -= DIM; } else { I[i] = static_cast<int>(X[i]); } } } } __device__ void BoundaryAdjust(float * X, int * I, float* vec, int mode, int DIM) { for(int i=0; i<3; i++) { if(I[i] < 0) { X[i] += DIM-1-1.e-3; I[i] += DIM-1; if(mode == 1) { I[i] = static_cast<int>(X[i]); } } if(I[i] >= DIM) { X[i] -= DIM-1-1.e-3; I[i] -= DIM-1; if(mode == 1) { I[i] = static_cast<int>(X[i]); } } } } /*// For new rays __device__ void BoundaryAdjust_new(float * X, int * I, int DIM) { for(int i=0; i<3; i++) { if(X[i] < 0) { X[i] += DIM; I[i] = static_cast<int>(X[i]); } if(X[i] >= DIM) { X[i] -= DIM; I[i] = static_cast<int>(X[i]); } } }*/ // This kernel traces rays until they split or end. // nGrid: number density of absorbers on the physical grid // xGrid: the neutral fraction of absorbers on the physical grid // Parts: Particles under consideration // GamGrid: rate of photon absorption on the physical grid // PixList: List of N pixels (in unique nested form) // RayDat: List of ray data in (R, tau_0, ..., tau_n) form // N0: Array of number of initial rays per particle // Nside: HEALPix parameter // L: Physical length of the side of the box // int is used because 2e9 is enough to get to HEALPix order 13 // dt: Length of the previous time step in Myr __global__ void rayTraceKernel( const float *nGrid, const float *xGrid, const source *Parts, float *GamGrid, float* dEArray, Ray *RayDat, int *N, int N0, float L, float a, float *nfSback, Domain domain, float dt) { // Determine properties of the ray to be traced: // 2+1D grid of 2D blocks. CHECKXXX // z dimension of grid is for particle ID // Blocks are 16x16 to fill the SM's in CC 3.5 int blockID = blockIdx.x + blockIdx.y * gridDim.x; int threadID = blockID * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; // Only computing Npix rays CHECKXXX if(threadID >= N0) return; int dim = domain.get_dim(); int nElements = dim*dim*dim; int domID = domain.get_id(); int xLim0[3];//, xLim1[3]; domain.get_x0(xLim0); int d_ind = dim*dim*xLim0[2] + dim*xLim0[1] + xLim0[0]; //domain.get_x1(xLim1); Ray *ray = RayDat + threadID; int partID = ray->get_part(); int pixID, ord; ray->get_pix(&pixID, &ord); /*if(partID <0 || partID >1) printf("?!? %d\t%d\n", domID, partID);*/ int Nside = (1 << ord); int Npix = 12 * Nside * Nside; // Find direction of ray float vec[3]; fpix2vec_nest(Nside, pixID, vec); // Find position of the ray float * X; X = ray->position; int * I; I = ray->gridloc; int nDom = domain.loc(I); // Find distance to domanin wall int domID3[3]; domain.get_id3(domID3); float XR[3]; XR[0] = Parts[partID].x/dim; XR[1] = Parts[partID].y/dim; XR[2] = Parts[partID].z/dim; float r_dom = dim*raystep(XR, domID3, XR, domID3, vec); /*if(pixID < 10 && ord == 2) printf("%f for (%f, %f, %f)\n", r_dom, vec[0], vec[1], vec[2]);*/ // printf("%d\t%e\t%e\t%e\t%e\n", pID, ray[0], X[0], X[1], X[2]); // printf("%d\t%e\t%d\t%d\t%d\n", pID, ray[0], I[0], I[1], I[2]); // printf("%d\t%e\t%e\t%e\t%e\n", pID, ray[0], vec[0], vec[1], vec[2]); // Grab the cross sections float sig[SPECIES][FREQ_BIN_NUM]; sigma(sig); // Loop variables float X0[3], dR; int I0[3], ind; // Set the max distance to trace a ray float Rmax = 1.7320*DIMX; //float Rmax = 32; float Rsplit = sqrt(Npix/12.56636/OMEGA_RAY); float dcross = fabsf(Rsplit - r_dom); /*if( dcross < 2.0) Rsplit = Rsplit - 2.0;*/ while(ray->R < Rsplit) { /*if(abs(X[0]-I[0]) > 2)// This is for checking boundary conditions REMOVEXXX if(pixID == 89829) { printf("%d %d %d %f %f %f\n", I[0], I[1], I[2], X[0], X[1], X[2]); }*/ ind = I[0] + dim*I[1] + dim*dim*I[2] - d_ind; memcpy(I0, I, 3*sizeof(int)); memcpy(X0, X, 3*sizeof(float)); // Take a single step dR = raystep(X, I, X, I, vec); // Check if the ray is just outside the domain if(nDom != domID) { // Check if it come from the boundary if(PERIODIC == 1) { if(BoundaryCheck(X, I, DIMX)) { //printf("CCC %e %e %e\n", X[0], X[1], X[2]); //CHECKXXX BoundaryAdjust(X, I, vec, 0, DIMX); } } // Entered if(domain.loc(I) == domID) { ind = I[0] + dim*I[1] + dim*dim*I[2] - d_ind; memcpy(I0, I, 3*sizeof(int)); memcpy(X0, X, 3*sizeof(float)); dR += raystep(X0, I0, X0, I0, vec); } else { rayFinish(ray, nDom, domain); atomicSub(N, 1); return; } } ray->R += dR; for(int nBin=0;nBin<FREQ_BIN_NUM;nBin++) { if(isnan(ray->flux[nBin])) { int i_0 = ind/(dim*dim); int i_1 = (ind/dim) % dim; int i_2 = ind%dim; printf("Problem Back Trace! %d %d %d %e %e)\n", i_0, i_1, i_2, xGrid[ind+nElements], ray->R); } } /*if(ind < 0 || ind >= dim*dim*dim) printf("??? %d %d\n", domID, ind);*/ // Calculate the column densities: float dL = (dR/DIMX)*L*a; // Hydrogen float nH = nGrid[ind]*(1.0-Y_P)*1.00; float nHI = nH*MAX(1.e-10, xGrid[ind]); float NcolHI = 3.086e24*dL*pow(a,-3)*nHI; // Helium float nHe = nGrid[ind]*0.25*Y_P; float nHeI = nHe*MAX(1.e-10, xGrid[ind+nElements]); float NcolHeI = 3.086e24*dL*pow(a,-3)*nHeI; ///////// Adjacent pixel correction ////////// float fc = 1.0; float Lpix = sqrtf(12.566*ray->R*ray->R/Npix); float Dedge = Lpix/2.0; int ind_c = ind; int del=1; float D[3]; for(int i=0; i<3; i++) { float Dci; Dci = X0[i] + dR*vec[i]/2.0 - (I0[i]+0.5); Dci = Dci; if(abs(Dci) > 0.5) { //Dci = 1.0 - abs(Dci); } D[i] = Dci; float De = 0.5 - fabs(Dci); if(De < Dedge) { Dedge = De; if(Dci > 0 && I[i] + 1 < xLim0[i] + dim)//CHECKXXX { ind_c = ind + del; } else if(Dci < 0 && I[i] > xLim0[i]) { ind_c = ind - del; } } del *= dim; } if(Dedge < Lpix/2.0) fc = powf(0.5 + Dedge/Lpix, 1.0); else fc = 1.0; //if(pixID == 10096) if(Dedge < 0) { fc = 1.0; //printf("%d: %f (%d %d %d)(%f %f %f)(%f %f %f)(%f %f %f)\n", pixID, ray->R, I0[0], I0[1], I0[2], X0[0], X0[1], X0[2], vec[0], vec[1], vec[2], D[0], D[1], D[2]); } if(D[0] > 0.5 || D[1] > 0.5 || D[2] > 0.5) { fc = 1.0; } // Truncating the correction for testing purposes //fc = MAX(0.8, fc); ///////// Adjacent pixel correction ////////// float gamH = 0; float gamHe = 0; float dE = 0; ///////// Absorption limiting constants ////////// // Total flux along the ray /*double S_0=0.0; for(int nBin=0; nBin<FREQ_BIN_NUM; nBin++) { S_0 += ray->flux[nBin]; } // "density" of rays within a cell/cell face double C = OMEGA_RAY*pow(Rsplit/ray->R, 2); // Size of and number of atoms in the cell double L_cell = (1.0/DIMX)*L*a; double LANX = pow(L_cell*3.086e24, 3)*nH;//*xGrid[ind]; LANX = LANX*(dL/L_cell); // Correction for length of ray // Ionization time double T_ion = 1.0e0*LANX/C/((S_0+1.e-30)*9.5234e48); double Delta_t = dt*3.15e13; double R_ion = Delta_t/T_ion;*/ for(int nBin=0;nBin<FREQ_BIN_NUM;nBin++) { float dtau, dtauH, dtauHe, dampH, dampHe, transmit, absorb, A, B; // Hydrogen dtauH = sig[0][nBin]*NcolHI; dampH = exp(-dtauH); // Absorption limiting execution /*double dtauH_new; if(Delta_t > T_ion) { double w = ray->flux[nBin]/S_0; double dtauH_critical = -log(1.0 - MIN(0.999,w*T_ion/Delta_t) ); dtauH_new = MIN( dtauH, dtauH_critical ); } dtauH = dtauH_new;*/ /*if(T_ion < Delta_t) { dampH = MAX((1.0 - T_ion/Delta_t)/3.0, dampH); //dtauH = -log(dampH); //dampH *= (1.0 - (1.0-exp(-R_ion))/R_ion); }*/ // Helium dtauHe = sig[1][nBin]*NcolHeI; dampHe = exp(-dtauHe); // Number of absorbtions per second absorb = ray->flux[nBin]*(1.0 - dampH*dampHe); // Keep track of total flux ray->flux[nBin] *= dampH*dampHe; // Fraction absorbed by H, He dtau = dtauH + dtauHe; if(dtau < 1.e-10) { // simplify for dtau~0 float temp_H = sig[0][nBin]*nH; float temp_He = sig[1][nBin]*nHe; A = temp_H/(temp_H+temp_He); B = temp_He/(temp_H+temp_He); } else { A = dtauH/dtau; B = dtauHe/dtau; } // Add total photon counts absorb /= powf(L/DIMX,3)*3.086e24; // Unit correction gamH += fc*A*absorb/nH; gamHe += fc*B*absorb/nHe; // Add the energy up CHECKXXX dE += fc*A*MAX(energy[nBin]-13.6,0)*absorb/nH; dE += fc*B*MAX(energy[nBin]-24.6,0)*absorb/nHe; // dE += fc*(energy[nBin]-13.6)*absorb/nGrid[ind]; } dE = 0; // Update flux array atomicAdd(GamGrid + ind, gamH); atomicAdd(GamGrid + ind + nElements, gamHe); // Update Energy array atomicAdd(dEArray + ind, dE); ///////// Adjacent pixel correction ////////// float ratio = 1.0; //ratio *= xGrid[ind_c]/xGrid[ind]; ratio *= nGrid[ind]/nGrid[ind_c]; //float ratio = 1.0; float gamH_c = ratio*gamH*(1.0-fc)/fc; float gamHe_c = ratio*gamHe*(1.0-fc)/fc; float dE_c = ratio*dE*(1.0-fc)/fc; atomicAdd(GamGrid + ind_c, gamH_c); atomicAdd(GamGrid + ind_c + nElements, gamHe_c); atomicAdd(dEArray + ind_c, dE_c); ///////// Adjacent pixel correction ////////// // Apply boundary conditions, if required float checkX[3]; int checkI[3]; memcpy(checkX, ray->position, 3*sizeof(float)); memcpy(checkI, ray->gridloc, 3*sizeof(int)); float X2[3]; int I2[3]; memcpy(X2, ray->position, 3*sizeof(float)); memcpy(I2, ray->gridloc, 3*sizeof(int)); float f_temp[3]; int i_temp[3]; for(int i=0; i<3; i++) { f_temp[i] = X[i]; i_temp[i] = I[i]; } if(PERIODIC == 1) { if(BoundaryCheck(X, I, DIMX)) { //printf("AAA %e %e %e\n", X[0], X[1], X[2]); CHECKXXX BoundaryAdjust(X, I, vec, 0, DIMX); } } // Terminate the ray //if( ray->R > Rmax || BoundaryCheck(X, I, DIMX) || ray->flux[0] < 1.e-12) if( ray->R > Rmax || ray->flux[1] < 1.e-8) { ray->set_dom(-1); atomicSub(N, 1); for(int nBin=0; nBin<FREQ_BIN_NUM; nBin++) { atomicAdd(nfSback + nBin, ray->flux[nBin]); // CHECKXXX if(isnan(ray->flux[nBin])) { printf("Checking ray->R = %f", ray->R); printf("Problem! %d (%d %d %d) (%d %d %d)\n", nBin, i_temp[0], i_temp[1], i_temp[2], I[0], I[1], I[2]); printf("Problem! %d (%e %e %e) (%e %e %e)\n", nBin, f_temp[0], f_temp[1], f_temp[2], X[0], X[1], X[2]); } } return; } nDom = domain.loc(I); if( nDom != domID ) { rayFinish(ray, nDom, domain); atomicSub(N, 1); return; } } // Add up all the rays that don't terminate __syncthreads(); } // This kernel splits the rays into the next HEALPix level until they split or end. // PixList: List of N pixels (in unique nested form) // RayDat: List of ray data in (R, tau_0, ..., tau_n) form // N0: Number of rays // int is used because 2e9 is enough to get to HEALPix order 13 __global__ void raySplitKernel( Ray *RayDat_init, Ray *RayDat, int *nRays, int N0, Ray *RayBuf, int* nBufLoc, const source * source_dev, Domain domain) { // 2+1D grid of 2D blocks. // z dimension of grid is for particle ID // Blocks are 16x16 to fill the SM's in CC 3.5 int blockID = blockIdx.x + blockIdx.y * gridDim.x; int threadID = blockID * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; // Only computing Npix rays CHECKXXX if(threadID >= N0) return; Ray *ray = RayDat_init + threadID; // Terminated rays if(ray->get_dom() == -1) { return; } // Split rays int rayDom = ray->get_dom(); if(rayDom == domain.get_id()) { // Get a unique ID for the (first) ray int rayID = atomicAdd(nRays, 4); int partID = ray->get_part(); int pixID, ord; ray->get_pix(&pixID, &ord); float origin[3]; origin[0] = source_dev[partID].x; origin[1] = source_dev[partID].y; origin[2] = source_dev[partID].z; // Splitting into 4 rays for(int nSplit=0; nSplit<4; nSplit++) { Ray *ray_split = RayDat + (rayID+nSplit); int new_ID = 4*pixID + nSplit; int new_ord = ord + 1; int Nside = (1 << new_ord); float direction[3]; fpix2vec_nest(Nside, new_ID, direction); ray_split->R = ray->R; ray_split->set_part(partID); ray_split->set_pix(new_ID, new_ord); float flux_init[FREQ_BIN_NUM]; for(int nBin=0; nBin<FREQ_BIN_NUM; nBin++) { flux_init[nBin] = ray->flux[nBin]/4; } ray_split->set_flux(flux_init); ray_split->set_position(origin, ray->R, direction); // Apply boundary float * rayX = ray_split->position; int * rayI = ray_split->gridloc; float * checkX = ray->position; int * checkI = ray->gridloc; int check = -1; if(new_ID == check) { printf("Placing at %d %d %d %f %f %f\n", rayI[0], rayI[1], rayI[2], rayX[0], rayX[1], rayX[2]); printf("From %d %d %d %f %f %f\n", checkI[0], checkI[1], checkI[2], checkX[0], checkX[1], checkX[2]); } if(PERIODIC == 1) { if(BoundaryCheck(rayX, rayI, DIMX)) { //printf("BBB %e %e %e\n", rayX[0], rayX[1], rayX[2]); //CHECKXXX BoundaryAdjust(rayX, rayI, direction, 1, DIMX); // mode = 1 originally } } if(new_ID == check) { printf("Now at %d %d %d %f %f %f\n", rayI[0], rayI[1], rayI[2], rayX[0], rayX[1], rayX[2]); } int splitDom = domain.loc(ray_split->gridloc); ray_split->set_dom(splitDom); // Move rays for different domains to the buffer if(splitDom != rayDom && splitDom >=0 && splitDom <8) { int nBufID = atomicAdd((nBufLoc + splitDom), 1); int pix, ord; ray_split->get_pix(&pix,&ord); RayBuf[splitDom*NUM_BUF + nBufID].copy_ray(*ray_split); int Nside2 = 1 << (2*ord); if(pix < 0 || pix > 12*Nside2) printf("SPLIT ADJUST Domain %d threadID %d N0 %d\n", domain.get_id(), threadID, N0); //printf("Split ray in wrong domain! %d: %d -> %d, (%d, %d, %d) %f\n", new_ID, rayDom, splitDom, rayI[0], rayI[1], rayI[2], ray->R); ray_split->set_dom(-1); } } // Terminate old ray ray->set_dom(-1); return; } // Buffer rays for(int dom=0; dom<8; dom++) { if(ray->get_dom() == domain.get_id()) continue; if(ray->get_dom() == dom) { // Conditional for testing REMOVEXXX int pixID, ord; ray->get_pix(&pixID, &ord); int Nside = (1 << ord); float direction[3]; fpix2vec_nest(Nside, pixID, direction); //if(direction[0]<-0.75 || direction[1]<-0.75 || direction[2]<-0.75) if(pixID >= 0) { // Copy ray into buffer int nBufID = atomicAdd((nBufLoc + dom), 1); //printf("A %d\t%d\t%d\t%d\n", domain.get_id(), dom, nBufLoc[dom], dom*NUM_BUF+nBufID); int pix, ord; ray->get_pix(&pix,&ord); int Nside2 = 1 << (2*ord); if(pix < 0 || pix > 12*Nside2) printf("NORMAL BUFFER Domain %d threadID %d N0 %d\n", domain.get_id(), threadID, N0); RayBuf[dom*NUM_BUF + nBufID].copy_ray(*ray); //ray->R = temp; } // Terminate old ray ray->set_dom(-1); return; } } printf("Couldn't find home for our ray! %d\n", ray->get_dom()); ray->set_dom(-1); }
5280268e242cb10169a02c2675e7375155a417f8.cu
#include <cuda.h> #include <math_functions.h> #include "./inc/chealpix.h" #include "./inc/chealpix.cu" #include "./inc/rates.cu" //__device__ float energy[FREQ_BIN_NUM] = {16.74, 24.65, 34.49, 52.06}; //__device__ float energy[FREQ_BIN_NUM] = {13.60, 24.65, 34.49, 52.06}; //float gfn[FREQ_BIN_NUM] = {1.0, 0.0, 0.0, 0.0}; //float gfn[FREQ_BIN_NUM] = { 0.5, 0.5, 0.0, 0.0}; //float gfn[FREQ_BIN_NUM] = { 0.25, 0.25, 0.25, 0.25}; //float gfn[FREQ_BIN_NUM] = { 0.277, 0.335, 0.2, 0.188}; //__device__ float gfn[FREQ_BIN_NUM] = { 0.277, 0.335, 0.2, 0.188}; //__device__ float gfn[FREQ_BIN_NUM] = { 0.465, 0.335, 0.2, 0.0}; // RAMSES //__device__ float energy[FREQ_BIN_NUM] = {1.440E+01, 1.990E+01, 3.508E+01, 3.508E+01}; __device__ float energy[FREQ_BIN_NUM] = {13.60, 24.60, 35.08, 35.08}; float gfn[FREQ_BIN_NUM] = { 0.414, 0.586, 0.0, 0.0}; //__device__ float energy[FREQ_BIN_NUM] = {17.00, 17.00, 17.00, 17.00}; //float gfn[FREQ_BIN_NUM] = {1.0, 0.0, 0.0, 0.0}; //__device__ float gfn[FREQ_BIN_NUM] = {1.0, 0.0, 0.0, 0.0}; __device__ float time(float redshift) { float h = 0.6711; float h0 = h*3.246753e-18; float omegam = 0.3; float yrtos = 3.15569e7; float time = 2.*powf((1. + redshift), -3. / 2.) / (3.*h0*powf(omegam, 0.5)); time = time / (yrtos*1.e6); return time; } __device__ float redshift(float time) { float h = 0.6711; float h0 = h*3.246753e-18; float omegam = 0.3; float yrtos = 3.15569e7; time = time*yrtos*1.e6; float redshift = powf((3.*h0*powf(omegam, 0.5)*time / 2.), -2. / 3.) - 1.; return redshift; } /*inline __device__ void sigma(float sig[][FREQ_BIN_NUM]) { sig[0][0] = 3.4630495932136023e-18; // 17.0 eV sig[0][1] = 0.0; // 10.0 eV sig[0][2] = 0.0; // 10.0 eV sig[0][3] = 0.0; // 10.0 eV sig[1][0] = 0.0; // 17.0 eV sig[1][1] = 0.0; // 10.0 eV sig[1][2] = 0.0; // 10.0 eV sig[1][3] = 0.0; // 10.0 eV }*/ /*inline __device__ void sigma(float sig[][FREQ_BIN_NUM]) { sig[0][0] = 6.2998764713e-18; // 13.6 eV sig[0][1] = 1.23034961639e-18; // 24.65 eV sig[0][2] = 4.70442276047e-19; // 34.49 eV sig[0][3] = 1.40170632038e-19; // 52.06 eV sig[1][0] = 0; // 16.74 eV sig[1][1] = 7.77983571966e-18; // 24.65 eV sig[1][2] = 4.20352986507e-18; // 34.49 eV sig[1][3] = 1.90620549625e-18; // 52.06 eV //sig[1][1] = 0; // 24.65 eV //sig[1][2] = 0; // 34.49 eV }*/ // RAMSES inline __device__ void sigma(float sig[][FREQ_BIN_NUM]) { sig[0][0] = 3.007e-18; // 13.6 eV sig[0][1] = 5.687e-19; // 24.65 eV sig[0][2] = 7.889e-20; // 34.49 eV sig[0][3] = 1.0; // 52.06 eV sig[1][0] = 0; // 16.74 eV sig[1][1] = 4.478e-18; // 24.65 eV sig[1][2] = 1.197e-18; // 34.49 eV sig[1][3] = 1.0; // 52.06 eV //sig[1][1] = 0; // 24.65 eV //sig[1][2] = 0; // 34.49 eV } /*inline __device__ void sigma(float sig[][FREQ_BIN_NUM]) { sig[0][0] = 1.23034961639e-18; // 13.6 eV sig[0][1] = 1.23034961639e-18; // 24.65 eV sig[0][2] = 4.70442276047e-19; // 34.49 eV sig[0][3] = 4.70442276047e-19; // 52.06 eV sig[1][0] = 0; // 16.74 eV sig[1][1] = 7.77983571966e-18; // 24.65 eV sig[1][2] = 4.20352986507e-18; // 34.49 eV sig[1][3] = 1.90620549625e-18; // 52.06 eV //sig[1][1] = 0; // 24.65 eV //sig[1][2] = 0; // 34.49 eV }*/ __device__ static float atomicMax(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fmaxf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } __device__ void step_bdf(float* yout, float* y, float* dGdx, float n, float E, float dt) { float xe = (1.0-Y_P)*(1.0-y[0])+0.25*Y_P*(1.0-y[1]); float ne = n*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); int CASE = 1; // Recombination rates (Black81) float al[2]; al[0] = rec_HII(T, CASE); al[1] = rec_HeII(T, CASE); // Collision Excitation float col[2]; col[0] = col_HI(T); col[1] = col_HeI(T); for(int nSpe=0; nSpe < SPECIES; nSpe++) { // Constant alpha //float source = al[nSpe]*(1.0-y[nSpe])*ne*dt + y[nSpe]; //float sink = 1.0 + (dGdx[nSpe] + col[nSpe]*ne)*dt; // Variable alpha float source = al[nSpe]*(1.0-y[nSpe])*ne*dt + y[nSpe]; float sink = 1.0 + (dGdx[nSpe] + al[nSpe]*ne + col[nSpe]*ne)*dt; // Basic, no alpha //float source = y[nSpe]; //float sink = 1.0 + (dGdx[nSpe])*dt; yout[nSpe] = source/sink; /*yout[nSpe] = y[nSpe]; yout[nSpe] += dt*(-gam[nSpe] - col[nSpe]*ne*y[nSpe]); yout[nSpe] += dt*al[nSpe]*(1.0-y[nSpe])*ne;*/ //yout[nSpe] = y[nSpe]; //yout[nSpe] += dt*(-dGdx[nSpe]*y[nSpe]); //yout[nSpe] += dt*al[nSpe]*(1.0-y[nSpe])*ne; // Quadratic formulation: /*float A = al[nSpe]*n; float B = -dGdx[nSpe] - 2.0*al[nSpe]*n; float C = al[nSpe]*n; float Anew = -A*dt; float Bnew = 1.0-dt*B; float Cnew = -y[nSpe]-C*dt; yout[nSpe] = (-Bnew + sqrt(Bnew*Bnew-4*Anew*Cnew))/(2*Anew); yout[nSpe] = MAX(yout[nSpe],1.0e-10); yout[nSpe] = MIN(yout[nSpe],1.0e0);*/ } } __device__ float dnHIdt(float* y, float* dGdx, float n, float E) { float xe = (1.0-Y_P)*(1.0-y[0])+0.25*Y_P*(1.0-y[1]); float ne = n*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); int CASE = 1; // Recombination rates (Black81) float al[2]; al[0] = rec_HII(T, CASE); al[1] = rec_HeII(T, CASE); // Collision Excitation float col[2]; col[0] = col_HI(T); col[1] = col_HeI(T); int nBin=0; float x = -dGdx[nBin]*y[nBin] - col[nBin]*ne*y[nBin] + al[nBin]*(1.0-y[nBin])*ne; return x; } __device__ float lambda(float E, float* y, float n, float a) { int CASE = 1; float dEdt = 0.0; float xe = (1.0-Y_P)*(1.0-y[0])+0.25*Y_P*(1.0-y[1]); float ne = n*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); float xHI = (1-Y_P)*y[0]; float xHII = (1-Y_P)*(1.0 - y[0]); float xHeI = 0.25*Y_P*y[1]; float xHeII = 0.0; //float xHeIII = 0.0; float colH = col_cool_HI(T)*ne*xHI; float colHe = col_cool_HeI(T)*ne*xHeI; float recH = rec_cool_HII(T, CASE)*ne*xHII; float recHe = rec_cool_HeII(T, CASE)*ne*xHeII; float colexH = colex_HI(T)*ne*xHI; float brem = 1.42e-27*powf(T,0.5)*ne*ne*6.242e11/n; dEdt += colH + recH + colexH; dEdt += colHe + recHe; dEdt += brem; // Adiabatic cooling: float H0 = 67.11*3.241e-20; dEdt += 3.0*H0*0.5477*powf(a, -1.5)*E; return dEdt; } __device__ float thin_source(float source, float fraction) { if(fraction < 1.e-30) { return 0.0; } else { return source/fraction; } } __global__ void timestep( double* rate, float dt, float* density, float* x_N, float* FluxArray, float* EArray, float* background, int dim, float L, float a) { int nelements=dim*dim*dim; int i0 = blockIdx.x*blockDim.x+threadIdx.x; int j0 = blockIdx.y*blockDim.y+threadIdx.y; int k0 = blockIdx.z; int ind=i0+dim*j0+dim*dim*k0; float xn[SPECIES]; float Gamma[SPECIES]; for(int nSpe=0; nSpe < SPECIES; nSpe++) { xn[nSpe] = x_N[ind + nSpe*nelements]; Gamma[nSpe] = FluxArray[ind + nSpe*nelements]; Gamma[nSpe] += background[nSpe]; // Ionization rate for each species (per Myr) } float dens = density[ind]/(a*a*a); // Baryonic number density float E = EArray[ind]; // Energy (temperature) per baryon // Subcycle loop to advance the chemistry // First we calculate the optically thin approximation of source terms: float dGdx[SPECIES]; for(int nSpe=0; nSpe<SPECIES; nSpe++) { dGdx[nSpe] = thin_source(Gamma[nSpe], xn[nSpe]); } // Find the max time step to advance hydrogen float dxdt; dxdt = abs(dnHIdt(xn, dGdx, dens, E)); float dx = (L/DIMX)*a; float sig = 1.111e7; // sig[0][0]*cm in a Mpc float tau = max(dens*sig*dx, 3.0); // dIdt //atomicMax(rate+1, dxdt*tau); // dnHdt //if(tau > 0.5) //atomicMax(rate, dxdt/xn[0]); // dt[ind] = dxdt/xn[0]; // Filter // Conservation calculation (rate[2]) float fDtMyr = dt*3.15e13; float xn_out[2]; float xe = (1.0-Y_P)*(1.0-xn[0])+0.25*Y_P*(1.0-xn[1]); float ne = dens*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); int CASE = 1; // Recombination rates (Black81) float al[2]; al[0] = rec_HII(T, CASE); al[1] = rec_HeII(T, CASE); // Collision Excitation float col[2]; col[0] = col_HI(T); col[1] = col_HeI(T); float fRec[2], x_eq, delta_x[2]; for(int nSpe=0; nSpe < SPECIES; nSpe++) { fRec[nSpe] = al[nSpe]*(1.0-xn[nSpe])*ne;//*(a*a*a);// volume weighted // Tracking neutral float C = dGdx[nSpe] + col[nSpe]*ne; float D = al[nSpe]*(1.0-xn[nSpe])*ne; //xn_out[nSpe] = (xn[nSpe] + D*fDtMyr)/(1 + C*fDtMyr); // Tracking ionized float x_ion = 1.0 - xn[nSpe]; x_ion = (x_ion + (C-D)*fDtMyr)/(1+C*fDtMyr); xn_out[nSpe] = 1.0 - x_ion; //xn_out[nSpe] = xn[nSpe] - dGdx[nSpe]*fDtMyr; //fRec = 0.0; if(xn[nSpe] > -0.05) { xn_out[nSpe] = xn[nSpe] - Gamma[nSpe]*fDtMyr + fRec[nSpe]*fDtMyr; } //xn_out[nSpe] = xn[nSpe] - Gamma[nSpe]*fDtMyr + fRec*fDtMyr; xn_out[nSpe] = MAX(xn_out[nSpe], 1.0e-10); xn_out[nSpe] = MIN(xn_out[nSpe], 1.0); delta_x[nSpe] = xn[nSpe] - xn_out[nSpe]; } //float delta_x = xn[0] - xn_out[0]; //fRec = al[0]*(1.0-xn[0])*ne; float cell_ratio = 0.0; //cell_ratio = 1 - (delta_x + fRec*fDtMyr)/(Gamma[0]*fDtMyr); atomicAdd(rate+0, double(delta_x[0]*density[ind])); atomicAdd(rate+1, double(Gamma[0]*density[ind])); atomicAdd(rate+2, double(fRec[0]*density[ind])); atomicAdd(rate+3, double(delta_x[1]*density[ind])); atomicAdd(rate+4, double(Gamma[1]*density[ind])); atomicAdd(rate+5, double(fRec[1]*density[ind])); //atomicAdd(rate+3, count_out*density[ind]); __syncthreads(); } __global__ void ionization( float dt, float* error, float* density, float* x_N, float* FluxArray, float* EArray, float* dEArray, float* background, int dim, float a) { int nelements=dim*dim*dim; int i0 = blockIdx.x*blockDim.x+threadIdx.x; int j0 = blockIdx.y*blockDim.y+threadIdx.y; int k0 = blockIdx.z; int index=i0+dim*j0+dim*dim*k0; float fDtMyr = dt*3.15e13; // float t = time(1.0/a - 1.0); float xn[SPECIES]; float xn_out[SPECIES]; float Gamma[SPECIES]; float fCumFlux = 0; for(int nSpe=0; nSpe < SPECIES; nSpe++) { xn[nSpe] = x_N[index+nSpe*nelements]; Gamma[nSpe] = FluxArray[index+nSpe*nelements]; Gamma[nSpe] += background[nSpe]; // Ionization rate for each species (per Myr) fCumFlux += xn[3+nSpe]*fDtMyr; } float dens = density[index]/(a*a*a); // Baryonic number density float E = EArray[index]; // Energy (temperature) per baryon float dEdt = dEArray[index]; //float dEdt = dEArray[index]+background[2]+background[3]; // Subcycle loop to advance the chemistry // First we calculate the optically thin approximation of source terms: float dEdx, dGdx[SPECIES]; for(int nSpe=0; nSpe<SPECIES; nSpe++) { dGdx[nSpe] = thin_source(Gamma[nSpe], xn[nSpe]); } dEdx = thin_source(dEdt, xn[0]); E = E + dEdt*dt; //step_bdf(xn_out, xn, dGdx, dens, E, dtSub); //step_bdf(float* yout, float* y, float* dGdx, float n, float E, float dt) float xe = (1.0-Y_P)*(1.0-xn[0])+0.25*Y_P*(1.0-xn[1]); float ne = dens*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); int CASE = 1; // Recombination rates (Black81) float al[2]; al[0] = rec_HII(T, CASE); al[1] = rec_HeII(T, CASE); // Collision Excitation float col[2]; col[0] = col_HI(T); col[1] = col_HeI(T); float fRec, x_eq; for(int nSpe=0; nSpe < SPECIES; nSpe++) { //fRec = al[nSpe]*(1.0-xn[nSpe])*ne;//*(a*a*a);// volume weighted fRec = al[nSpe]*(1.0-xn[nSpe])*ne;//*(a*a*a);// volume weighted //fRec = 0.0; /*float source = al[nSpe]*(1.0-xn[nSpe])*ne*fDtMyr + xn[nSpe]; float sink = 1.0 + (dGdx[nSpe] + col[nSpe]*ne)*fDtMyr; xn_out[nSpe] = source/sink;*/ // Tracking neutral float C = dGdx[nSpe] + col[nSpe]*ne; float D = al[nSpe]*(1.0-xn[nSpe])*ne; //xn_out[nSpe] = (xn[nSpe] + D*fDtMyr)/(1 + C*fDtMyr); // Tracking ionized float x_ion = 1.0 - xn[nSpe]; x_ion = (x_ion + (C-D)*fDtMyr)/(1+C*fDtMyr); xn_out[nSpe] = 1.0 - x_ion; //xn_out[nSpe] = xn[nSpe] - dGdx[nSpe]*fDtMyr; //fRec = 0.0; if(xn[nSpe] > 0.05) { xn_out[nSpe] = xn[nSpe] - Gamma[nSpe]*fDtMyr + fRec*fDtMyr; } //xn_out[nSpe] = xn[nSpe] - Gamma[nSpe]*fDtMyr + fRec*fDtMyr; //xn_out[nSpe] = atomicAdd(error, fRec*density[index]); //atomicAdd(error, 1); //xn_out[nBin] = xn[nBin]/(1+dGdx[nBin]*1*fDtMyr); //yout[nBin] += dt*al[nBin]*(1.0-y[nBin])*ne; } //x_eq = al[0]*(1.0-xn[0])*ne/(Gamma[0]+1.e-3); for(int nSpe=0; nSpe<SPECIES; nSpe++) { xn[nSpe] = xn_out[nSpe]; xn[nSpe] = MAX(xn[nSpe], 1.0e-10); //xn[nSpe] = MAX(xn[nSpe], x_eq); xn[nSpe] = MIN(xn[nSpe], 1.0); } __syncthreads(); for(int nSpe=0; nSpe<SPECIES; nSpe++) { x_N[index+nSpe*nelements] = xn[nSpe]; } //float xerr[SPECIES]; // Change the energy array if(E <= 0) EArray[index] = 0.0; //EArray[index] = E; else EArray[index] = E; //for(int i=0; i<SPECIES; i++) //{ // xerr[i] = xn_out[i]; //} // if(index == 1056832) // *error = xeq; // atomicMax(error, EArray[index]); // atomicAdd(error, al[nBin]*(1.0-y[nBin])*ne*dt); // __syncthreads(); } // This is here because derivs is inherently inline under CUDA architecture. //#include "./inc/rkck.cu" //#include "./inc/simpr.cu" __global__ void ionization1( float dt, float* error, float* density, float* x_N, float* FluxArray, float* EArray, float* dEArray, float* background, int dim, float a) { int nelements=dim*dim*dim; int i0 = blockIdx.x*blockDim.x+threadIdx.x; int j0 = blockIdx.y*blockDim.y+threadIdx.y; int k0 = blockIdx.z; int index=i0+dim*j0+dim*dim*k0; float fDtMyr = dt*3.15e13; // float t = time(1.0/a - 1.0); float xn[SPECIES]; float xn_out[SPECIES]; float Gamma[SPECIES]; float fCumFlux = 0; for(int nSpe=0; nSpe < SPECIES; nSpe++) { xn[nSpe] = x_N[index+nSpe*nelements]; Gamma[nSpe] = FluxArray[index+nSpe*nelements]; Gamma[nSpe] += background[nSpe]; // Ionization rate for each species (per Myr) fCumFlux += xn[3+nSpe]*fDtMyr; } float dens = density[index]/(a*a*a); // Baryonic number density float E = EArray[index]; // Energy (temperature) per baryon float dEdt = dEArray[index]; //float dEdt = dEArray[index]+background[2]+background[3]; float eps = 0.1; // Maximum fractional change during subcycle float fDtRem; // Remaining time in the subcycle loop // Subcycle loop to advance the chemistry // First we calculate the optically thin approximation of source terms: float dEdx, dGdx[SPECIES]; for(int nSpe=0; nSpe<SPECIES; nSpe++) { dGdx[nSpe] = thin_source(Gamma[nSpe], xn[nSpe]); } dEdx = thin_source(dEdt, xn[0]); // Recombination addition float xe = (1.0-Y_P)*(1.0-xn[0])+0.25*Y_P*(1.0-xn[1]); float ne = dens*xe; float T = E/((3./2.)*8.6173303e-5)/(1.0+xe); int CASE = 1; float al[2]; al[0] = rec_HII(T, CASE); al[1] = rec_HeII(T, CASE); float fRec; for(int nSpe=0; nSpe<SPECIES; nSpe++) { fRec = al[nSpe]*(1.0-xn[nSpe])*ne; atomicAdd(error, fRec); } // Subcycle: fDtRem = fDtMyr; while(fDtRem > 1.0) // One second { // Find the max time step to advance E float dtSubE; float Lam = lambda(E, xn, dens, a); float Heat = dEdx*xn[0]; float dEdt = Heat - Lam; float rate = abs(dEdt); if(rate < eps*E/fDtRem) { dtSubE = fDtRem; } else { dtSubE = eps*E/rate; } // Find the max time step to advance hydrogen float dtSubH; rate = abs(dnHIdt(xn, dGdx, dens, E)); if(rate < eps*0.1/fDtRem) { dtSubH = fDtRem; } else { dtSubH = eps*0.1/rate; } float dtSub = min(dtSubE, dtSubH); // Updating energy //float E1 = E + dEdt*dtSub; //float dEdt1 = dEdx*xn[0] - lambda(E1, xn, dens, a); //E = MIN(2.e4*1.29e-4, E + (dEdt+dEdt1)*dtSub/2); E = E + dEdt*dtSub; step_bdf(xn_out, xn, dGdx, dens, E, dtSub); for(int nSpe=0; nSpe<SPECIES; nSpe++) { if (xn_out[nSpe] < 0.0) { xn[nSpe] = 0.0; } else if (xn_out[nSpe] <= 1.0) { xn[nSpe] = xn_out[nSpe]; } else { xn[nSpe] = 1.0; } } fDtRem = fDtRem - dtSub; } __syncthreads(); for(int nSpe=0; nSpe<SPECIES; nSpe++) { x_N[index+nSpe*nelements] = xn[nSpe]; } //float xerr[SPECIES]; // Change the energy array if(E <= 0) EArray[index] = 0.0; //EArray[index] = E; else EArray[index] = E; //for(int i=0; i<SPECIES; i++) //{ // xerr[i] = xn_out[i]; //} // if(index == 1056832) // *error = xeq; // atomicMax(error, EArray[index]); //atomicAdd(error, fCumFlux); // __syncthreads(); } // Signum function __device__ int sign(float x) { return (x > 0) - (x < 0); } // Does the HEALPix math but gives a float __device__ void fpix2vec_nest(long n, long m, float* vec) { double temp[3]; pix2vec_nest(n, m, temp); vec[0] = (float) temp[0]; vec[1] = (float) temp[1]; vec[2] = (float) temp[2]; } // Takes position (x0) and direction(u) and takes a step along integer grid to x __device__ float raystep(float* x, int* ijk, float* x0, int* ijk0, float* u) { // Minimum projection, to prevent divide by 0 float eps = 1.e-10; // Length of step float dS; // Direction of movement along each axis int s[3]; for(int i=0;i<3;i++) s[i] = sign(u[i]); // Distance to nearest cell face along each axis float r[3]; for(int i=0;i<3;i++) { if(s[i] != 0) r[i] = fabsf((ijk0[i] + (s[i]+1.0)/2.0) - x0[i])/MAX(eps,fabsf(u[i])); else r[i] = 1.0/eps; } // Initialize next step for(int i=0;i<3;i++) ijk[i] = ijk0[i]; // Take the step if(r[0] <= r[1] && r[0] <= r[2]) { dS = r[0]; ijk[0] += s[0]; } if(r[1] <= r[0] && r[1] <= r[2]) { dS = r[1]; ijk[1] += s[1]; } if(r[2] <= r[0] && r[2] <= r[1]) { dS = r[2]; ijk[2] += s[2]; } for(int i=0;i<3;i++) x[i] = x0[i] + dS*u[i]; return dS; } __device__ int rayFinish(Ray *ray, int nDom, Domain domain) { if(nDom == domain.get_id()) { printf("Problem: attempting to send ray to self."); ray->set_dom(-1); return 1; } for(int dom=0; dom<8; dom++) { if(nDom == dom) { ray->set_dom(dom); return 1; } } ray->set_dom(-1); return 1; } __device__ void round_down(int * I, float * X) { I[0] = __double2int_rd(X[0]); I[1] = __double2int_rd(X[1]); I[2] = __double2int_rd(X[2]); } // Returns 1 if the ray is outside the domain __device__ int BoundaryCheck(float * X, int * I, int DIM) { for(int i=0; i<3; i++) { if( X[i] < 0 || I[i] < 0 || X[i] >= DIM || I[i] >= DIM) { return 1; } } return 0; } // For tracking rays // X and I are ray position and gridloc, vec is the direction of the ray // mode is for adjusting rays tracked by the tracer (0) or placed on the grid (1) __device__ void BoundaryAdjustOld(float * X, int * I, float* vec, int mode, int DIM) { for(int i=0; i<3; i++) { if(I[i] < 0) { X[i] += DIM; if(mode == 0) { I[i] += DIM; } else { I[i] = static_cast<int>(X[i]); } } if(I[i] >= DIM) { X[i] -= DIM; if(mode == 0) { I[i] -= DIM; } else { I[i] = static_cast<int>(X[i]); } } } } __device__ void BoundaryAdjust(float * X, int * I, float* vec, int mode, int DIM) { for(int i=0; i<3; i++) { if(I[i] < 0) { X[i] += DIM-1-1.e-3; I[i] += DIM-1; if(mode == 1) { I[i] = static_cast<int>(X[i]); } } if(I[i] >= DIM) { X[i] -= DIM-1-1.e-3; I[i] -= DIM-1; if(mode == 1) { I[i] = static_cast<int>(X[i]); } } } } /*// For new rays __device__ void BoundaryAdjust_new(float * X, int * I, int DIM) { for(int i=0; i<3; i++) { if(X[i] < 0) { X[i] += DIM; I[i] = static_cast<int>(X[i]); } if(X[i] >= DIM) { X[i] -= DIM; I[i] = static_cast<int>(X[i]); } } }*/ // This kernel traces rays until they split or end. // nGrid: number density of absorbers on the physical grid // xGrid: the neutral fraction of absorbers on the physical grid // Parts: Particles under consideration // GamGrid: rate of photon absorption on the physical grid // PixList: List of N pixels (in unique nested form) // RayDat: List of ray data in (R, tau_0, ..., tau_n) form // N0: Array of number of initial rays per particle // Nside: HEALPix parameter // L: Physical length of the side of the box // int is used because 2e9 is enough to get to HEALPix order 13 // dt: Length of the previous time step in Myr __global__ void rayTraceKernel( const float *nGrid, const float *xGrid, const source *Parts, float *GamGrid, float* dEArray, Ray *RayDat, int *N, int N0, float L, float a, float *nfSback, Domain domain, float dt) { // Determine properties of the ray to be traced: // 2+1D grid of 2D blocks. CHECKXXX // z dimension of grid is for particle ID // Blocks are 16x16 to fill the SM's in CC 3.5 int blockID = blockIdx.x + blockIdx.y * gridDim.x; int threadID = blockID * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; // Only computing Npix rays CHECKXXX if(threadID >= N0) return; int dim = domain.get_dim(); int nElements = dim*dim*dim; int domID = domain.get_id(); int xLim0[3];//, xLim1[3]; domain.get_x0(xLim0); int d_ind = dim*dim*xLim0[2] + dim*xLim0[1] + xLim0[0]; //domain.get_x1(xLim1); Ray *ray = RayDat + threadID; int partID = ray->get_part(); int pixID, ord; ray->get_pix(&pixID, &ord); /*if(partID <0 || partID >1) printf("?!? %d\t%d\n", domID, partID);*/ int Nside = (1 << ord); int Npix = 12 * Nside * Nside; // Find direction of ray float vec[3]; fpix2vec_nest(Nside, pixID, vec); // Find position of the ray float * X; X = ray->position; int * I; I = ray->gridloc; int nDom = domain.loc(I); // Find distance to domanin wall int domID3[3]; domain.get_id3(domID3); float XR[3]; XR[0] = Parts[partID].x/dim; XR[1] = Parts[partID].y/dim; XR[2] = Parts[partID].z/dim; float r_dom = dim*raystep(XR, domID3, XR, domID3, vec); /*if(pixID < 10 && ord == 2) printf("%f for (%f, %f, %f)\n", r_dom, vec[0], vec[1], vec[2]);*/ // printf("%d\t%e\t%e\t%e\t%e\n", pID, ray[0], X[0], X[1], X[2]); // printf("%d\t%e\t%d\t%d\t%d\n", pID, ray[0], I[0], I[1], I[2]); // printf("%d\t%e\t%e\t%e\t%e\n", pID, ray[0], vec[0], vec[1], vec[2]); // Grab the cross sections float sig[SPECIES][FREQ_BIN_NUM]; sigma(sig); // Loop variables float X0[3], dR; int I0[3], ind; // Set the max distance to trace a ray float Rmax = 1.7320*DIMX; //float Rmax = 32; float Rsplit = sqrt(Npix/12.56636/OMEGA_RAY); float dcross = fabsf(Rsplit - r_dom); /*if( dcross < 2.0) Rsplit = Rsplit - 2.0;*/ while(ray->R < Rsplit) { /*if(abs(X[0]-I[0]) > 2)// This is for checking boundary conditions REMOVEXXX if(pixID == 89829) { printf("%d %d %d %f %f %f\n", I[0], I[1], I[2], X[0], X[1], X[2]); }*/ ind = I[0] + dim*I[1] + dim*dim*I[2] - d_ind; memcpy(I0, I, 3*sizeof(int)); memcpy(X0, X, 3*sizeof(float)); // Take a single step dR = raystep(X, I, X, I, vec); // Check if the ray is just outside the domain if(nDom != domID) { // Check if it come from the boundary if(PERIODIC == 1) { if(BoundaryCheck(X, I, DIMX)) { //printf("CCC %e %e %e\n", X[0], X[1], X[2]); //CHECKXXX BoundaryAdjust(X, I, vec, 0, DIMX); } } // Entered if(domain.loc(I) == domID) { ind = I[0] + dim*I[1] + dim*dim*I[2] - d_ind; memcpy(I0, I, 3*sizeof(int)); memcpy(X0, X, 3*sizeof(float)); dR += raystep(X0, I0, X0, I0, vec); } else { rayFinish(ray, nDom, domain); atomicSub(N, 1); return; } } ray->R += dR; for(int nBin=0;nBin<FREQ_BIN_NUM;nBin++) { if(isnan(ray->flux[nBin])) { int i_0 = ind/(dim*dim); int i_1 = (ind/dim) % dim; int i_2 = ind%dim; printf("Problem Back Trace! %d %d %d %e %e)\n", i_0, i_1, i_2, xGrid[ind+nElements], ray->R); } } /*if(ind < 0 || ind >= dim*dim*dim) printf("??? %d %d\n", domID, ind);*/ // Calculate the column densities: float dL = (dR/DIMX)*L*a; // Hydrogen float nH = nGrid[ind]*(1.0-Y_P)*1.00; float nHI = nH*MAX(1.e-10, xGrid[ind]); float NcolHI = 3.086e24*dL*pow(a,-3)*nHI; // Helium float nHe = nGrid[ind]*0.25*Y_P; float nHeI = nHe*MAX(1.e-10, xGrid[ind+nElements]); float NcolHeI = 3.086e24*dL*pow(a,-3)*nHeI; ///////// Adjacent pixel correction ////////// float fc = 1.0; float Lpix = sqrtf(12.566*ray->R*ray->R/Npix); float Dedge = Lpix/2.0; int ind_c = ind; int del=1; float D[3]; for(int i=0; i<3; i++) { float Dci; Dci = X0[i] + dR*vec[i]/2.0 - (I0[i]+0.5); Dci = Dci; if(abs(Dci) > 0.5) { //Dci = 1.0 - abs(Dci); } D[i] = Dci; float De = 0.5 - fabs(Dci); if(De < Dedge) { Dedge = De; if(Dci > 0 && I[i] + 1 < xLim0[i] + dim)//CHECKXXX { ind_c = ind + del; } else if(Dci < 0 && I[i] > xLim0[i]) { ind_c = ind - del; } } del *= dim; } if(Dedge < Lpix/2.0) fc = powf(0.5 + Dedge/Lpix, 1.0); else fc = 1.0; //if(pixID == 10096) if(Dedge < 0) { fc = 1.0; //printf("%d: %f (%d %d %d)(%f %f %f)(%f %f %f)(%f %f %f)\n", pixID, ray->R, I0[0], I0[1], I0[2], X0[0], X0[1], X0[2], vec[0], vec[1], vec[2], D[0], D[1], D[2]); } if(D[0] > 0.5 || D[1] > 0.5 || D[2] > 0.5) { fc = 1.0; } // Truncating the correction for testing purposes //fc = MAX(0.8, fc); ///////// Adjacent pixel correction ////////// float gamH = 0; float gamHe = 0; float dE = 0; ///////// Absorption limiting constants ////////// // Total flux along the ray /*double S_0=0.0; for(int nBin=0; nBin<FREQ_BIN_NUM; nBin++) { S_0 += ray->flux[nBin]; } // "density" of rays within a cell/cell face double C = OMEGA_RAY*pow(Rsplit/ray->R, 2); // Size of and number of atoms in the cell double L_cell = (1.0/DIMX)*L*a; double LANX = pow(L_cell*3.086e24, 3)*nH;//*xGrid[ind]; LANX = LANX*(dL/L_cell); // Correction for length of ray // Ionization time double T_ion = 1.0e0*LANX/C/((S_0+1.e-30)*9.5234e48); double Delta_t = dt*3.15e13; double R_ion = Delta_t/T_ion;*/ for(int nBin=0;nBin<FREQ_BIN_NUM;nBin++) { float dtau, dtauH, dtauHe, dampH, dampHe, transmit, absorb, A, B; // Hydrogen dtauH = sig[0][nBin]*NcolHI; dampH = exp(-dtauH); // Absorption limiting execution /*double dtauH_new; if(Delta_t > T_ion) { double w = ray->flux[nBin]/S_0; double dtauH_critical = -log(1.0 - MIN(0.999,w*T_ion/Delta_t) ); dtauH_new = MIN( dtauH, dtauH_critical ); } dtauH = dtauH_new;*/ /*if(T_ion < Delta_t) { dampH = MAX((1.0 - T_ion/Delta_t)/3.0, dampH); //dtauH = -log(dampH); //dampH *= (1.0 - (1.0-exp(-R_ion))/R_ion); }*/ // Helium dtauHe = sig[1][nBin]*NcolHeI; dampHe = exp(-dtauHe); // Number of absorbtions per second absorb = ray->flux[nBin]*(1.0 - dampH*dampHe); // Keep track of total flux ray->flux[nBin] *= dampH*dampHe; // Fraction absorbed by H, He dtau = dtauH + dtauHe; if(dtau < 1.e-10) { // simplify for dtau~0 float temp_H = sig[0][nBin]*nH; float temp_He = sig[1][nBin]*nHe; A = temp_H/(temp_H+temp_He); B = temp_He/(temp_H+temp_He); } else { A = dtauH/dtau; B = dtauHe/dtau; } // Add total photon counts absorb /= powf(L/DIMX,3)*3.086e24; // Unit correction gamH += fc*A*absorb/nH; gamHe += fc*B*absorb/nHe; // Add the energy up CHECKXXX dE += fc*A*MAX(energy[nBin]-13.6,0)*absorb/nH; dE += fc*B*MAX(energy[nBin]-24.6,0)*absorb/nHe; // dE += fc*(energy[nBin]-13.6)*absorb/nGrid[ind]; } dE = 0; // Update flux array atomicAdd(GamGrid + ind, gamH); atomicAdd(GamGrid + ind + nElements, gamHe); // Update Energy array atomicAdd(dEArray + ind, dE); ///////// Adjacent pixel correction ////////// float ratio = 1.0; //ratio *= xGrid[ind_c]/xGrid[ind]; ratio *= nGrid[ind]/nGrid[ind_c]; //float ratio = 1.0; float gamH_c = ratio*gamH*(1.0-fc)/fc; float gamHe_c = ratio*gamHe*(1.0-fc)/fc; float dE_c = ratio*dE*(1.0-fc)/fc; atomicAdd(GamGrid + ind_c, gamH_c); atomicAdd(GamGrid + ind_c + nElements, gamHe_c); atomicAdd(dEArray + ind_c, dE_c); ///////// Adjacent pixel correction ////////// // Apply boundary conditions, if required float checkX[3]; int checkI[3]; memcpy(checkX, ray->position, 3*sizeof(float)); memcpy(checkI, ray->gridloc, 3*sizeof(int)); float X2[3]; int I2[3]; memcpy(X2, ray->position, 3*sizeof(float)); memcpy(I2, ray->gridloc, 3*sizeof(int)); float f_temp[3]; int i_temp[3]; for(int i=0; i<3; i++) { f_temp[i] = X[i]; i_temp[i] = I[i]; } if(PERIODIC == 1) { if(BoundaryCheck(X, I, DIMX)) { //printf("AAA %e %e %e\n", X[0], X[1], X[2]); CHECKXXX BoundaryAdjust(X, I, vec, 0, DIMX); } } // Terminate the ray //if( ray->R > Rmax || BoundaryCheck(X, I, DIMX) || ray->flux[0] < 1.e-12) if( ray->R > Rmax || ray->flux[1] < 1.e-8) { ray->set_dom(-1); atomicSub(N, 1); for(int nBin=0; nBin<FREQ_BIN_NUM; nBin++) { atomicAdd(nfSback + nBin, ray->flux[nBin]); // CHECKXXX if(isnan(ray->flux[nBin])) { printf("Checking ray->R = %f", ray->R); printf("Problem! %d (%d %d %d) (%d %d %d)\n", nBin, i_temp[0], i_temp[1], i_temp[2], I[0], I[1], I[2]); printf("Problem! %d (%e %e %e) (%e %e %e)\n", nBin, f_temp[0], f_temp[1], f_temp[2], X[0], X[1], X[2]); } } return; } nDom = domain.loc(I); if( nDom != domID ) { rayFinish(ray, nDom, domain); atomicSub(N, 1); return; } } // Add up all the rays that don't terminate __syncthreads(); } // This kernel splits the rays into the next HEALPix level until they split or end. // PixList: List of N pixels (in unique nested form) // RayDat: List of ray data in (R, tau_0, ..., tau_n) form // N0: Number of rays // int is used because 2e9 is enough to get to HEALPix order 13 __global__ void raySplitKernel( Ray *RayDat_init, Ray *RayDat, int *nRays, int N0, Ray *RayBuf, int* nBufLoc, const source * source_dev, Domain domain) { // 2+1D grid of 2D blocks. // z dimension of grid is for particle ID // Blocks are 16x16 to fill the SM's in CC 3.5 int blockID = blockIdx.x + blockIdx.y * gridDim.x; int threadID = blockID * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; // Only computing Npix rays CHECKXXX if(threadID >= N0) return; Ray *ray = RayDat_init + threadID; // Terminated rays if(ray->get_dom() == -1) { return; } // Split rays int rayDom = ray->get_dom(); if(rayDom == domain.get_id()) { // Get a unique ID for the (first) ray int rayID = atomicAdd(nRays, 4); int partID = ray->get_part(); int pixID, ord; ray->get_pix(&pixID, &ord); float origin[3]; origin[0] = source_dev[partID].x; origin[1] = source_dev[partID].y; origin[2] = source_dev[partID].z; // Splitting into 4 rays for(int nSplit=0; nSplit<4; nSplit++) { Ray *ray_split = RayDat + (rayID+nSplit); int new_ID = 4*pixID + nSplit; int new_ord = ord + 1; int Nside = (1 << new_ord); float direction[3]; fpix2vec_nest(Nside, new_ID, direction); ray_split->R = ray->R; ray_split->set_part(partID); ray_split->set_pix(new_ID, new_ord); float flux_init[FREQ_BIN_NUM]; for(int nBin=0; nBin<FREQ_BIN_NUM; nBin++) { flux_init[nBin] = ray->flux[nBin]/4; } ray_split->set_flux(flux_init); ray_split->set_position(origin, ray->R, direction); // Apply boundary float * rayX = ray_split->position; int * rayI = ray_split->gridloc; float * checkX = ray->position; int * checkI = ray->gridloc; int check = -1; if(new_ID == check) { printf("Placing at %d %d %d %f %f %f\n", rayI[0], rayI[1], rayI[2], rayX[0], rayX[1], rayX[2]); printf("From %d %d %d %f %f %f\n", checkI[0], checkI[1], checkI[2], checkX[0], checkX[1], checkX[2]); } if(PERIODIC == 1) { if(BoundaryCheck(rayX, rayI, DIMX)) { //printf("BBB %e %e %e\n", rayX[0], rayX[1], rayX[2]); //CHECKXXX BoundaryAdjust(rayX, rayI, direction, 1, DIMX); // mode = 1 originally } } if(new_ID == check) { printf("Now at %d %d %d %f %f %f\n", rayI[0], rayI[1], rayI[2], rayX[0], rayX[1], rayX[2]); } int splitDom = domain.loc(ray_split->gridloc); ray_split->set_dom(splitDom); // Move rays for different domains to the buffer if(splitDom != rayDom && splitDom >=0 && splitDom <8) { int nBufID = atomicAdd((nBufLoc + splitDom), 1); int pix, ord; ray_split->get_pix(&pix,&ord); RayBuf[splitDom*NUM_BUF + nBufID].copy_ray(*ray_split); int Nside2 = 1 << (2*ord); if(pix < 0 || pix > 12*Nside2) printf("SPLIT ADJUST Domain %d threadID %d N0 %d\n", domain.get_id(), threadID, N0); //printf("Split ray in wrong domain! %d: %d -> %d, (%d, %d, %d) %f\n", new_ID, rayDom, splitDom, rayI[0], rayI[1], rayI[2], ray->R); ray_split->set_dom(-1); } } // Terminate old ray ray->set_dom(-1); return; } // Buffer rays for(int dom=0; dom<8; dom++) { if(ray->get_dom() == domain.get_id()) continue; if(ray->get_dom() == dom) { // Conditional for testing REMOVEXXX int pixID, ord; ray->get_pix(&pixID, &ord); int Nside = (1 << ord); float direction[3]; fpix2vec_nest(Nside, pixID, direction); //if(direction[0]<-0.75 || direction[1]<-0.75 || direction[2]<-0.75) if(pixID >= 0) { // Copy ray into buffer int nBufID = atomicAdd((nBufLoc + dom), 1); //printf("A %d\t%d\t%d\t%d\n", domain.get_id(), dom, nBufLoc[dom], dom*NUM_BUF+nBufID); int pix, ord; ray->get_pix(&pix,&ord); int Nside2 = 1 << (2*ord); if(pix < 0 || pix > 12*Nside2) printf("NORMAL BUFFER Domain %d threadID %d N0 %d\n", domain.get_id(), threadID, N0); RayBuf[dom*NUM_BUF + nBufID].copy_ray(*ray); //ray->R = temp; } // Terminate old ray ray->set_dom(-1); return; } } printf("Couldn't find home for our ray! %d\n", ray->get_dom()); ray->set_dom(-1); }
684fd72433e514eb59e617a83187b3e32c978546.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************/ /*! @file SierpinskiTriangle.cu @par Purpose: Implementation of SierpinskiTriangle CUDA kernel @par Language: C++ @par Platform: Visual Studio 2015, Windows 10 64-bit @author YongKiat @par Email: yongkiat.ong\@digipen.edu @date 07/12/2018 */ /******************************************************************************/ #include "Common.h" #define TriangleSize (1<< 5) __global__ void SierpinskiTriangleKernel(uchar* d_DataOut,uint limit) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y* blockDim.y; //if (x >= PIXELDIM || y >= PIXELDIM) // return; int i = 0; for (; y >= 0; --y) { // printing space till // the value of y for (; i < y; ++i) { //outFile << " "; d_DataOut[x + PIXELDIM * y] = 0XFF; // b d_DataOut[x + PIXELDIM * y + PIXELDIM2] = 0XFF; // g d_DataOut[x + PIXELDIM * y + PIXELDIM2 + PIXELDIM2] = 0XFF; // r } // printing '*' for (; x + y < PIXELDIM;++x) { // printing '*' at the appropriate position // is done by the and value of x and y // wherever value is 0 we have printed '*' if ((x&y)) { d_DataOut[x + PIXELDIM * y] = 0XFF; // b d_DataOut[x + PIXELDIM * y + PIXELDIM2] = 0XFF; // g d_DataOut[x + PIXELDIM * y + PIXELDIM2 + PIXELDIM2] = 0XFF; // r //outFile << " " << " "; //SetData(x, y, 255, data); //SetData(x, y, 255, data); } else { d_DataOut[x + PIXELDIM * y] = 0x00; // b d_DataOut[x + PIXELDIM * y + PIXELDIM2] = 0x00; // g d_DataOut[x + PIXELDIM * y + PIXELDIM2 + PIXELDIM2] = 0xff; // r //SetData(x, y, 0, data); //outFile << "* "; } } //outFile << endl; } } #define STriangleDefault void STriangle::TriangleGPU(uchar** gpuOutput) { #ifdef STriangleDefault dim3 Block(TriangleSize, TriangleSize, 1); dim3 Grid(ceil(((float)PIXELDIM) / TriangleSize), ceil(((float)PIXELDIM) / TriangleSize), 1); checkCudaErrors(hipMalloc((void **)&ptr1, PIXELDIM3 * sizeof(uchar))); SierpinskiTriangleKernel << <Grid, Block >> > (ptr1,PIXELDIM); hipDeviceSynchronize(); *gpuOutput = (uchar *)malloc(PIXELDIM3 * sizeof(uchar)); checkCudaErrors(hipMemcpy(*gpuOutput, ptr1, PIXELDIM3 * sizeof(uchar), hipMemcpyDeviceToHost)); #endif } void STriangle::ClearMemory(uchar**data) { #ifdef STriangleDefault hipFree(ptr1); free(*data); #endif }
684fd72433e514eb59e617a83187b3e32c978546.cu
/******************************************************************************/ /*! @file SierpinskiTriangle.cu @par Purpose: Implementation of SierpinskiTriangle CUDA kernel @par Language: C++ @par Platform: Visual Studio 2015, Windows 10 64-bit @author YongKiat @par Email: yongkiat.ong\@digipen.edu @date 07/12/2018 */ /******************************************************************************/ #include "Common.h" #define TriangleSize (1<< 5) __global__ void SierpinskiTriangleKernel(uchar* d_DataOut,uint limit) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y* blockDim.y; //if (x >= PIXELDIM || y >= PIXELDIM) // return; int i = 0; for (; y >= 0; --y) { // printing space till // the value of y for (; i < y; ++i) { //outFile << " "; d_DataOut[x + PIXELDIM * y] = 0XFF; // b d_DataOut[x + PIXELDIM * y + PIXELDIM2] = 0XFF; // g d_DataOut[x + PIXELDIM * y + PIXELDIM2 + PIXELDIM2] = 0XFF; // r } // printing '*' for (; x + y < PIXELDIM;++x) { // printing '*' at the appropriate position // is done by the and value of x and y // wherever value is 0 we have printed '*' if ((x&y)) { d_DataOut[x + PIXELDIM * y] = 0XFF; // b d_DataOut[x + PIXELDIM * y + PIXELDIM2] = 0XFF; // g d_DataOut[x + PIXELDIM * y + PIXELDIM2 + PIXELDIM2] = 0XFF; // r //outFile << " " << " "; //SetData(x, y, 255, data); //SetData(x, y, 255, data); } else { d_DataOut[x + PIXELDIM * y] = 0x00; // b d_DataOut[x + PIXELDIM * y + PIXELDIM2] = 0x00; // g d_DataOut[x + PIXELDIM * y + PIXELDIM2 + PIXELDIM2] = 0xff; // r //SetData(x, y, 0, data); //outFile << "* "; } } //outFile << endl; } } #define STriangleDefault void STriangle::TriangleGPU(uchar** gpuOutput) { #ifdef STriangleDefault dim3 Block(TriangleSize, TriangleSize, 1); dim3 Grid(ceil(((float)PIXELDIM) / TriangleSize), ceil(((float)PIXELDIM) / TriangleSize), 1); checkCudaErrors(cudaMalloc((void **)&ptr1, PIXELDIM3 * sizeof(uchar))); SierpinskiTriangleKernel << <Grid, Block >> > (ptr1,PIXELDIM); cudaDeviceSynchronize(); *gpuOutput = (uchar *)malloc(PIXELDIM3 * sizeof(uchar)); checkCudaErrors(cudaMemcpy(*gpuOutput, ptr1, PIXELDIM3 * sizeof(uchar), cudaMemcpyDeviceToHost)); #endif } void STriangle::ClearMemory(uchar**data) { #ifdef STriangleDefault cudaFree(ptr1); free(*data); #endif }
f30f1d06929f696096e887e871a7f4042f1e2a90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zgeadd.cu normal z -> s, Fri Sep 11 18:29:19 2015 @author Mark Gates */ #include "common_magma.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to slaset. */ __global__ void sgeadd_full( int m, int n, float alpha, const float *dA, int ldda, float *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb]; } } } } /** Purpose ------- ZGEADD adds two matrices, dB = alpha*dA + dB. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] alpha REAL The scalar alpha. @param[in] dA REAL array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in,out] dB REAL array, dimension (LDDB,N) The m by n matrix dB. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_sgeadd_q( magma_int_t m, magma_int_t n, float alpha, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dB, magma_int_t lddb, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 ) return; dim3 threads( BLK_X ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) ); hipLaunchKernelGGL(( sgeadd_full), dim3(grid), dim3(threads), 0, queue , m, n, alpha, dA, ldda, dB, lddb ); } /** @see magmablas_sgeadd_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_sgeadd( magma_int_t m, magma_int_t n, float alpha, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dB, magma_int_t lddb ) { magmablas_sgeadd_q( m, n, alpha, dA, ldda, dB, lddb, magma_stream ); }
f30f1d06929f696096e887e871a7f4042f1e2a90.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zgeadd.cu normal z -> s, Fri Sep 11 18:29:19 2015 @author Mark Gates */ #include "common_magma.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to slaset. */ __global__ void sgeadd_full( int m, int n, float alpha, const float *dA, int ldda, float *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb]; } } } } /** Purpose ------- ZGEADD adds two matrices, dB = alpha*dA + dB. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] alpha REAL The scalar alpha. @param[in] dA REAL array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in,out] dB REAL array, dimension (LDDB,N) The m by n matrix dB. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_sgeadd_q( magma_int_t m, magma_int_t n, float alpha, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dB, magma_int_t lddb, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 ) return; dim3 threads( BLK_X ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) ); sgeadd_full<<< grid, threads, 0, queue >>> ( m, n, alpha, dA, ldda, dB, lddb ); } /** @see magmablas_sgeadd_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_sgeadd( magma_int_t m, magma_int_t n, float alpha, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dB, magma_int_t lddb ) { magmablas_sgeadd_q( m, n, alpha, dA, ldda, dB, lddb, magma_stream ); }
2e134b390ae601b8b8cee64f62b38a0897585094.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include "Utilities.cuh" #define BLOCKSIZE_NUDFT2_2D_X 16 #define BLOCKSIZE_NUDFT2_2D_Y 16 //#define DEBUG #define pi 3.141592653589793238463 /*************************/ /* KERNEL MATRIX FILLING */ /*************************/ __global__ void Kernel_Matrix_Filling(const double * __restrict__ d_X, const double * __restrict__ d_Y, const double * __restrict__ d_u, const double * __restrict__ d_v, double2 * __restrict__ d_Kernel_Matrix, const int Nu, const int Nv, const int M, const int N) { const int tidx = blockIdx.x * blockDim.x + threadIdx.x; const int tidy = blockIdx.y * blockDim.y + threadIdx.y; // --- Evaluates the matrix filling index const int tid = tidy * M + tidx; if (tidx < M && tidy < N) { d_Kernel_Matrix[tid].x = cos(-2. * pi * ((d_u[tidy] * d_X[tidx]) / static_cast<double>(Nu) + (d_v[tidy] * d_Y[tidx]) / static_cast<double>(Nv))); d_Kernel_Matrix[tid].y = sin(-2. * pi * ((d_u[tidy] * d_X[tidx]) / static_cast<double>(Nu) + (d_v[tidy] * d_Y[tidx]) / static_cast<double>(Nv))); //printf("%f %f\n", d_Kernel_Matrix[tid].x, d_Kernel_Matrix[tid].y); } } /************/ /* NDFT2 2D */ /************/ extern "C" { __declspec(dllexport) void NDFT2_2D_GPU(const double * __restrict__ d_X, const double * __restrict__ d_Y, const double * __restrict__ d_u, const double * __restrict__ d_v, double2 * __restrict__ d_in, double2 * __restrict__ d_out, const int Nu, const int Nv, const int M, const int N) { // --- N: length of d_u and d_v // --- M: length of d_X and d_Y hipblasHandle_t handle; cublasSafeCall(hipblasCreate(&handle)); double2 *d_Kernel_Matrix; gpuErrchk(hipMalloc(&d_Kernel_Matrix, M * N * sizeof(double2))); // --- Filling the kernel matrix dim3 dimBlock(BLOCKSIZE_NUDFT2_2D_X, BLOCKSIZE_NUDFT2_2D_Y); dim3 dimGrid(iDivUp(M, BLOCKSIZE_NUDFT2_2D_X), iDivUp(N, BLOCKSIZE_NUDFT2_2D_Y)); Kernel_Matrix_Filling << <dimGrid, dimBlock >> > (d_X, d_Y, d_u, d_v, d_Kernel_Matrix, Nu, Nv, M, N); #ifdef DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif // --- Matrix multiplication double2 alpha; alpha.x = 1.; alpha.y = 0.; double2 beta; beta.x = 0.; beta.y = 0.; cublasSafeCall(hipblasZgemv(handle, HIPBLAS_OP_T, M, N, &alpha, d_Kernel_Matrix, M, d_in, 1, &beta, d_out, 1)); // --- Freeing device memory gpuErrchk(hipFree(d_Kernel_Matrix)); } }
2e134b390ae601b8b8cee64f62b38a0897585094.cu
#include<stdio.h> #include "Utilities.cuh" #define BLOCKSIZE_NUDFT2_2D_X 16 #define BLOCKSIZE_NUDFT2_2D_Y 16 //#define DEBUG #define pi 3.141592653589793238463 /*************************/ /* KERNEL MATRIX FILLING */ /*************************/ __global__ void Kernel_Matrix_Filling(const double * __restrict__ d_X, const double * __restrict__ d_Y, const double * __restrict__ d_u, const double * __restrict__ d_v, double2 * __restrict__ d_Kernel_Matrix, const int Nu, const int Nv, const int M, const int N) { const int tidx = blockIdx.x * blockDim.x + threadIdx.x; const int tidy = blockIdx.y * blockDim.y + threadIdx.y; // --- Evaluates the matrix filling index const int tid = tidy * M + tidx; if (tidx < M && tidy < N) { d_Kernel_Matrix[tid].x = cos(-2. * pi * ((d_u[tidy] * d_X[tidx]) / static_cast<double>(Nu) + (d_v[tidy] * d_Y[tidx]) / static_cast<double>(Nv))); d_Kernel_Matrix[tid].y = sin(-2. * pi * ((d_u[tidy] * d_X[tidx]) / static_cast<double>(Nu) + (d_v[tidy] * d_Y[tidx]) / static_cast<double>(Nv))); //printf("%f %f\n", d_Kernel_Matrix[tid].x, d_Kernel_Matrix[tid].y); } } /************/ /* NDFT2 2D */ /************/ extern "C" { __declspec(dllexport) void NDFT2_2D_GPU(const double * __restrict__ d_X, const double * __restrict__ d_Y, const double * __restrict__ d_u, const double * __restrict__ d_v, double2 * __restrict__ d_in, double2 * __restrict__ d_out, const int Nu, const int Nv, const int M, const int N) { // --- N: length of d_u and d_v // --- M: length of d_X and d_Y cublasHandle_t handle; cublasSafeCall(cublasCreate(&handle)); double2 *d_Kernel_Matrix; gpuErrchk(cudaMalloc(&d_Kernel_Matrix, M * N * sizeof(double2))); // --- Filling the kernel matrix dim3 dimBlock(BLOCKSIZE_NUDFT2_2D_X, BLOCKSIZE_NUDFT2_2D_Y); dim3 dimGrid(iDivUp(M, BLOCKSIZE_NUDFT2_2D_X), iDivUp(N, BLOCKSIZE_NUDFT2_2D_Y)); Kernel_Matrix_Filling << <dimGrid, dimBlock >> > (d_X, d_Y, d_u, d_v, d_Kernel_Matrix, Nu, Nv, M, N); #ifdef DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif // --- Matrix multiplication double2 alpha; alpha.x = 1.; alpha.y = 0.; double2 beta; beta.x = 0.; beta.y = 0.; cublasSafeCall(cublasZgemv(handle, CUBLAS_OP_T, M, N, &alpha, d_Kernel_Matrix, M, d_in, 1, &beta, d_out, 1)); // --- Freeing device memory gpuErrchk(cudaFree(d_Kernel_Matrix)); } }
5d091242a90bcc9c0516d27fbd219e27b31bf692.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <stdint.h> //uint32_tint4 #include <stdlib.h> // #include <hip/hip_runtime.h> /**/ #define width 1024 #define heigth 1024 /*bmp*/ #pragma pack(push,1) typedef struct tagBITMAPFILEHEADER{ //BITMAPFILEHEADER14 byte unsigned short bfType; //bfTypebmp"BM" uint32_t bfSize; //bfsize unsigned short bfReserved1; //bfReserved120 unsigned short bfReserved2; uint32_t bf0ffBits; //bf0ffBits }BITMAPFILEHEADER; #pragma pack(pop) typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERbmp40 byte uint32_t biSize; // uint32_t biWidth; // uint32_t biHeight; // unsigned short biPlanes; //1 unsigned short biBitCount; //bit8 uint32_t biCompression; //bmp0 uint32_t biSizeImage; //bmpbiCompression=00 uint32_t biXPelsPerMeter; //biXPelsPerMeterbiYPelsPerMeter0 uint32_t biYPelsPerMeter; uint32_t biCirUsed; //0 uint32_t biCirImportant; //0 }BITMAPINFOHEADER; typedef struct tagRGBQUAD{ unsigned char rgbBlue; unsigned char rgbGreen; unsigned char rgbRed; unsigned char rgbReserved; }RGBQUAD; /**/ __global__ void fresnel_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d, int *points_d){ int i, j, k; j=blockDim.x*blockIdx.x+threadIdx.x; //width i=blockDim.y*blockIdx.y+threadIdx.y; //heigth /**/ float wave_len=0.633F; // float wave_num=M_PI/wave_len; //21 for(k=0; k<*points_d; k++){ lumi_intensity_d[i*width+j]=lumi_intensity_d[i*width+j]+cosf(wave_num*((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k]))*z_d[k]); } } /**/ float lumi_intensity[width*heigth]; // unsigned char img[width*heigth]; //bmp /*main*/ int main(){ BITMAPFILEHEADER bmpFh; BITMAPINFOHEADER bmpIh; RGBQUAD rgbQ[256]; /**/ char filename[16]; // int i, j; int points; // float min, max, mid; //2 FILE *fp, *fp1; /*BITMAPFILEHEADER*/ bmpFh.bfType =19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778 bmpFh.bfSize =14+40+1024+(width*heigth); //10242564 byte bmpFh.bfReserved1 =0; bmpFh.bfReserved2 =0; bmpFh.bf0ffBits =14+40+1024; /*BITMAPINFOHEADER*/ bmpIh.biSize =40; bmpIh.biWidth =width; bmpIh.biHeight =heigth; bmpIh.biPlanes =1; bmpIh.biBitCount =8; bmpIh.biCompression =0; bmpIh.biSizeImage =0; bmpIh.biXPelsPerMeter =0; bmpIh.biYPelsPerMeter =0; bmpIh.biCirUsed =0; bmpIh.biCirImportant =0; /*RGBQUAD*/ for(i=0; i<256; i++){ rgbQ[i].rgbBlue =i; rgbQ[i].rgbGreen =i; rgbQ[i].rgbRed =i; rgbQ[i].rgbReserved =0; } /*3D*/ printf("please input filename : "); scanf("%s", filename); fp=fopen(filename,"rb"); // if(fp==NULL){ // printf("error!\n"); } fread(&points, sizeof(int), 1, fp); // printf("the number of points is %d\n", points); // int x[points]; //~~~~ int y[points]; float z[points]; int x_buf, y_buf, z_buf; // /**/ for(i=0; i<points; i++){ fread(&x_buf, sizeof(int), 1, fp); fread(&y_buf, sizeof(int), 1, fp); fread(&z_buf, sizeof(int), 1, fp); x[i]=x_buf*40+width*0.5; //40 y[i]=y_buf*40+heigth*0.5; z[i]=1.0F/(((float)z_buf)*40+10000.0F); } fclose(fp); /**/ int *x_d, *y_d; float *z_d; float *lumi_intensity_d; int *points_d; dim3 block(32,32,1); //() dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //() // dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1); /**/ hipMalloc((void**)&x_d, points*sizeof(int)); hipMalloc((void**)&y_d, points*sizeof(int)); hipMalloc((void**)&z_d, points*sizeof(float)); hipMalloc((void**)&lumi_intensity_d, width*heigth*sizeof(float)); hipMalloc((void**)&points_d, sizeof(int)); /**/ hipMemcpy(x_d, x, points*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(y_d, y, points*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(z_d, z, points*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(lumi_intensity_d, lumi_intensity, width*heigth*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(points_d, &points, sizeof(int), hipMemcpyHostToDevice); /**/ hipLaunchKernelGGL(( fresnel_gpu), dim3(grid), dim3(block) , 0, 0, x_d, y_d, z_d, lumi_intensity_d, points_d); /**/ hipMemcpy(lumi_intensity, lumi_intensity_d, width*heigth*sizeof(float), hipMemcpyDeviceToHost); /**/ hipFree(x_d); hipFree(y_d); hipFree(z_d); hipFree(lumi_intensity_d); hipFree(points_d); //lumi~[0] min=lumi_intensity[0]; max=lumi_intensity[0]; /**/ for(i=0; i<heigth; i++){ for(j=0; j<width; j++){ if(min>lumi_intensity[i*width+j]){ min=lumi_intensity[i*width+j]; } if(max<lumi_intensity[i*width+j]){ max=lumi_intensity[i*width+j]; } } } mid=(min+max)*0.5F; // printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid); /*2*/ for(i=0; i<width*heigth; i++){ if(lumi_intensity[i]<mid){ img[i]=0; } if(lumi_intensity[i]>mid){ img[i]=255; } } /*fp(b)(w)*/ fp1=fopen("fresnel-gpu.bmp","wb"); /**/ fwrite(&bmpFh, sizeof(bmpFh), 1, fp1); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp); fwrite(&bmpIh, sizeof(bmpIh), 1, fp1); fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp1); fwrite(img, sizeof(unsigned char), width*heigth, fp1); //bmp printf("'fresnel-gpu.bmp' was saved.\n\n"); fclose(fp1); return 0; }
5d091242a90bcc9c0516d27fbd219e27b31bf692.cu
#include <stdio.h> #include <math.h> #include <stdint.h> //uint32_tは符号なしintで4バイトに指定 #include <stdlib.h> //記憶域管理を使うため #include <cuda.h> /*記号定数として横幅と縦幅を定義*/ #define width 1024 #define heigth 1024 /*bmpの構造体*/ #pragma pack(push,1) typedef struct tagBITMAPFILEHEADER{ //構造体BITMAPFILEHEADERはファイルの先頭に来るもので,サイズは14 byte unsigned short bfType; //bfTypeは,bmp形式であることを示すため,"BM"が入る uint32_t bfSize; //bfsizeは,ファイル全体のバイト数 unsigned short bfReserved1; //bfReserved1と2は予約領域で,0になる unsigned short bfReserved2; uint32_t bf0ffBits; //bf0ffBitsは先頭から画素データまでのバイト数 }BITMAPFILEHEADER; #pragma pack(pop) typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERはbmpファイルの画像の情報の構造体で,サイズは40 byte uint32_t biSize; //画像のサイズ uint32_t biWidth; //横の画素数 uint32_t biHeight; //縦の画素数 unsigned short biPlanes; //1 unsigned short biBitCount; //一画素あたりの色の数のbit数.今回は8 uint32_t biCompression; //圧縮タイプを表す.bmpは非圧縮なので0 uint32_t biSizeImage; //bmp配列のサイズを表す.biCompression=0なら基本的に0 uint32_t biXPelsPerMeter; //biXPelsPerMeterとbiYPelsPerMeterは基本的に0 uint32_t biYPelsPerMeter; uint32_t biCirUsed; //0 uint32_t biCirImportant; //0 }BITMAPINFOHEADER; typedef struct tagRGBQUAD{ unsigned char rgbBlue; unsigned char rgbGreen; unsigned char rgbRed; unsigned char rgbReserved; }RGBQUAD; /*フレネル近似のカーネル関数*/ __global__ void fresnel_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d, int *points_d){ int i, j, k; j=blockDim.x*blockIdx.x+threadIdx.x; //widthのループの置き換え i=blockDim.y*blockIdx.y+threadIdx.y; //heigthのループの置き換え /*計算に必要な変数の定義*/ float wave_len=0.633F; //光波長 float wave_num=M_PI/wave_len; //波数の2分の1 for(k=0; k<*points_d; k++){ lumi_intensity_d[i*width+j]=lumi_intensity_d[i*width+j]+cosf(wave_num*((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k]))*z_d[k]); } } /*画像生成用の配列*/ float lumi_intensity[width*heigth]; //光強度用の配列 unsigned char img[width*heigth]; //bmp用の配列 /*main関数*/ int main(){ BITMAPFILEHEADER bmpFh; BITMAPINFOHEADER bmpIh; RGBQUAD rgbQ[256]; /*デバイス側の変数*/ char filename[16]; //入力するファイル名の配列 int i, j; int points; //物体点 float min, max, mid; //2値化に用いる FILE *fp, *fp1; /*BITMAPFILEHEADERの構造体*/ bmpFh.bfType =19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778 bmpFh.bfSize =14+40+1024+(width*heigth); //1024はカラーパレットのサイズ.256階調で4 byte一組 bmpFh.bfReserved1 =0; bmpFh.bfReserved2 =0; bmpFh.bf0ffBits =14+40+1024; /*BITMAPINFOHEADERの構造体*/ bmpIh.biSize =40; bmpIh.biWidth =width; bmpIh.biHeight =heigth; bmpIh.biPlanes =1; bmpIh.biBitCount =8; bmpIh.biCompression =0; bmpIh.biSizeImage =0; bmpIh.biXPelsPerMeter =0; bmpIh.biYPelsPerMeter =0; bmpIh.biCirUsed =0; bmpIh.biCirImportant =0; /*RGBQUADの構造体*/ for(i=0; i<256; i++){ rgbQ[i].rgbBlue =i; rgbQ[i].rgbGreen =i; rgbQ[i].rgbRed =i; rgbQ[i].rgbReserved =0; } /*3Dファイルの読み込み*/ printf("please input filename : "); scanf("%s", filename); fp=fopen(filename,"rb"); //バイナリで読み込み if(fp==NULL){ //読み込めなかった際の動作 printf("error!\n"); } fread(&points, sizeof(int), 1, fp); //データのアドレス,サイズ,個数,ファイルポインタを指定 printf("the number of points is %d\n", points); //取り出した物体点を入れる配列 int x[points]; //~~データを読み込むことで初めてこの配列が定義できる~~ int y[points]; float z[points]; int x_buf, y_buf, z_buf; //データを一時的に溜めておくための変数 /*各バッファに物体点座標を取り込み,ホログラム面と物体点の位置を考慮したデータを各配列に入れる*/ for(i=0; i<points; i++){ fread(&x_buf, sizeof(int), 1, fp); fread(&y_buf, sizeof(int), 1, fp); fread(&z_buf, sizeof(int), 1, fp); x[i]=x_buf*40+width*0.5; //物体点を離すために物体点座標に40を掛け,中心の座標を足す y[i]=y_buf*40+heigth*0.5; z[i]=1.0F/(((float)z_buf)*40+10000.0F); } fclose(fp); /*デバイス側の変数*/ int *x_d, *y_d; float *z_d; float *lumi_intensity_d; int *points_d; dim3 block(32,32,1); //ブロックサイズ(スレッド数)の配置 dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //グリッドサイズ(ブロック数)の配置 // dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1); /*デバイス側のメモリ確保*/ cudaMalloc((void**)&x_d, points*sizeof(int)); cudaMalloc((void**)&y_d, points*sizeof(int)); cudaMalloc((void**)&z_d, points*sizeof(float)); cudaMalloc((void**)&lumi_intensity_d, width*heigth*sizeof(float)); cudaMalloc((void**)&points_d, sizeof(int)); /*ホスト側からデバイス側にデータ転送*/ cudaMemcpy(x_d, x, points*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(y_d, y, points*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(z_d, z, points*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(lumi_intensity_d, lumi_intensity, width*heigth*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(points_d, &points, sizeof(int), cudaMemcpyHostToDevice); /*カーネル関数の起動*/ fresnel_gpu<<< grid, block >>>(x_d, y_d, z_d, lumi_intensity_d, points_d); /*デバイス側からホスト側にデータ転送*/ cudaMemcpy(lumi_intensity, lumi_intensity_d, width*heigth*sizeof(float), cudaMemcpyDeviceToHost); /*デバイスのメモリ解放*/ cudaFree(x_d); cudaFree(y_d); cudaFree(z_d); cudaFree(lumi_intensity_d); cudaFree(points_d); //最大・最小値用の変数を比較できるようにとりあえずlumi~[0]を入れる min=lumi_intensity[0]; max=lumi_intensity[0]; /*最大値,最小値を求める*/ for(i=0; i<heigth; i++){ for(j=0; j<width; j++){ if(min>lumi_intensity[i*width+j]){ min=lumi_intensity[i*width+j]; } if(max<lumi_intensity[i*width+j]){ max=lumi_intensity[i*width+j]; } } } mid=(min+max)*0.5F; //中間値(閾値)を求める printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid); /*各々の光強度配列の値を中間値と比較し,2値化する*/ for(i=0; i<width*heigth; i++){ if(lumi_intensity[i]<mid){ img[i]=0; } if(lumi_intensity[i]>mid){ img[i]=255; } } /*宣言したfpと使用するファイル名,その読み書きモードを設定.バイナリ(b)で書き込み(w)*/ fp1=fopen("fresnel-gpu.bmp","wb"); /*書き込むデータのアドレス,データのサイズ,データの個数,ファイルのポインタを指定*/ fwrite(&bmpFh, sizeof(bmpFh), 1, fp1); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp);というように個別に書くことも可能 fwrite(&bmpIh, sizeof(bmpIh), 1, fp1); fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp1); fwrite(img, sizeof(unsigned char), width*heigth, fp1); //bmpに書き込み printf("'fresnel-gpu.bmp' was saved.\n\n"); fclose(fp1); return 0; }
Atomic_Add_Kernel.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //---------------------------------*-CUDA-*----------------------------------// /*! * \file CudaUtils/test/Atomic_Add_Kernel.cu * \author Seth R Johnson * \date Thu Aug 15 11:09:56 2013 * \brief Atomic_Add_Kernel kernel definitions. * \note Copyright (C) 2013 Oak Ridge National Laboratory, UT-Battelle, LLC. */ //---------------------------------------------------------------------------// #include "../cuda_utils/Kernel_Header.cuh" #include "Atomic_Add_Kernel.cuh" #include "../cuda_utils/Atomic_Add.cuh" #include "../cuda_utils/CudaDBC.hh" namespace cuda { //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// template<typename Arch_Switch, typename Float_T> __global__ void atomic_add_test_kernel( Float_T* const __restrict__ out, unsigned int num_increments) { Atomic_Add<Arch_Switch, Float_T> atomic_add; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (; i < num_increments; i += stride) atomic_add(&out[0], static_cast<Float_T>(1)); } //---------------------------------------------------------------------------// // HOST INTERFACES //---------------------------------------------------------------------------// template<typename Arch_Switch, typename Float_T> void atomic_add_test(Atomic_Add_Kernel_Data<Arch_Switch, Float_T>& kd) { REQUIRE(kd.output.size() == 1); REQUIRE(kd.num_increments > 0); // Check for prior launch failure CudaCall(hipGetLastError()); CUDA_LAUNCH((atomic_add_test_kernel<Arch_Switch, Float_T>), kd.launch_args)( kd.output.data(), kd.num_increments); // Wait until kernel is finished CudaInsist(hipDeviceSynchronize(), "Kernel execution error"); } //---------------------------------------------------------------------------// // INSTANTIATIONS //---------------------------------------------------------------------------// #ifdef __NVCC__ typedef ::cuda::arch::Device Arch_t; #else typedef ::cuda::arch::Host Arch_t; #endif template void atomic_add_test(Atomic_Add_Kernel_Data<Arch_t, float>& kd); template void atomic_add_test(Atomic_Add_Kernel_Data<Arch_t, double>& kd); //---------------------------------------------------------------------------// } // end namespace cuda //---------------------------------------------------------------------------// // end of cuda_utils/test/Atomic_Add_Kernel.cu //---------------------------------------------------------------------------//
Atomic_Add_Kernel.cu
//---------------------------------*-CUDA-*----------------------------------// /*! * \file CudaUtils/test/Atomic_Add_Kernel.cu * \author Seth R Johnson * \date Thu Aug 15 11:09:56 2013 * \brief Atomic_Add_Kernel kernel definitions. * \note Copyright (C) 2013 Oak Ridge National Laboratory, UT-Battelle, LLC. */ //---------------------------------------------------------------------------// #include "../cuda_utils/Kernel_Header.cuh" #include "Atomic_Add_Kernel.cuh" #include "../cuda_utils/Atomic_Add.cuh" #include "../cuda_utils/CudaDBC.hh" namespace cuda { //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// template<typename Arch_Switch, typename Float_T> __global__ void atomic_add_test_kernel( Float_T* const __restrict__ out, unsigned int num_increments) { Atomic_Add<Arch_Switch, Float_T> atomic_add; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (; i < num_increments; i += stride) atomic_add(&out[0], static_cast<Float_T>(1)); } //---------------------------------------------------------------------------// // HOST INTERFACES //---------------------------------------------------------------------------// template<typename Arch_Switch, typename Float_T> void atomic_add_test(Atomic_Add_Kernel_Data<Arch_Switch, Float_T>& kd) { REQUIRE(kd.output.size() == 1); REQUIRE(kd.num_increments > 0); // Check for prior launch failure CudaCall(cudaGetLastError()); CUDA_LAUNCH((atomic_add_test_kernel<Arch_Switch, Float_T>), kd.launch_args)( kd.output.data(), kd.num_increments); // Wait until kernel is finished CudaInsist(cudaDeviceSynchronize(), "Kernel execution error"); } //---------------------------------------------------------------------------// // INSTANTIATIONS //---------------------------------------------------------------------------// #ifdef __NVCC__ typedef ::cuda::arch::Device Arch_t; #else typedef ::cuda::arch::Host Arch_t; #endif template void atomic_add_test(Atomic_Add_Kernel_Data<Arch_t, float>& kd); template void atomic_add_test(Atomic_Add_Kernel_Data<Arch_t, double>& kd); //---------------------------------------------------------------------------// } // end namespace cuda //---------------------------------------------------------------------------// // end of cuda_utils/test/Atomic_Add_Kernel.cu //---------------------------------------------------------------------------//
c17924a807f691d8c65f9039bf71cf4e6676be6d.hip
// !!! This is a file automatically generated by hipify!!! /** * * bashCGPU/CUDA * https://suzukiiichiro.github.io/search/?keyword= $ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -c $ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -r GPU $ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n GPU $ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n * */ #include <iostream> #include <vector> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #define MAX 27 #define THREAD_NUM 96 // //#define UINT64_C(c) c ## ULL // // unsigned long TOTAL=0; unsigned long UNIQUE=0; //GPU typedef struct local { unsigned int BOUND1,BOUND2; unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK; unsigned long board[MAX]; unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE; unsigned int STEPS; }local; typedef struct cond { unsigned int DOWN; unsigned int LEFT; unsigned int RIGHT; }cond; // CPU / void symmetryOps(unsigned int size,struct local* l) { /** (1) 9090( 180)90(270) */ if(l->board[l->BOUND2]==1){ unsigned int ptn; unsigned int own; for(ptn=2,own=1;own<size;++own,ptn<<=1){ unsigned int bit; unsigned int you; for(bit=1,you=size-1;(l->board[you]!=ptn)&&l->board[own]>=bit;--you){ bit<<=1; } if(l->board[own]>bit){ return ; } if(l->board[own]<bit){ break; } }//end for // if(own>size-1){ l->COUNT2++; return ; }//end if }//end if /** (2) 90270 180 180 () */ // if(l->board[size-1]==l->ENDBIT){ unsigned int you; unsigned int own; for(you=size-1-1,own=1;own<=size-1;++own,--you){ unsigned int bit; unsigned int ptn; for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){ bit<<=1; } if(l->board[own]>bit){ return ; } if(l->board[own]<bit){ break; } }//end for // if(own>size-1){ l->COUNT4++; return ; } }//end if /** (3)180() */ // if(l->board[l->BOUND1]==l->TOPBIT){ unsigned int ptn; unsigned int own; unsigned int you; unsigned int bit; for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){ for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){ bit<<=1; } if(l->board[own]>bit){ return ; } if(l->board[own]<bit){ break; } }//end for }//end if l->COUNT8++; } /** CPU -c */ // Q void symmetry_backTrack_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l) { unsigned int mask=(1<<size)-1; unsigned int down[size]; unsigned int left[size]; unsigned int right[size]; unsigned int bitmap[size]; left[row]=_left; down[row]=_down; right[row]=_right; bitmap[row]=mask&~(left[row]|down[row]|right[row]); while(row>0){ if(bitmap[row]>0){ if(row<l->BOUND1){ // bitmap[row]|=l->SIDEMASK; bitmap[row]^=l->SIDEMASK; }else if(row==l->BOUND2){ // if((down[row]&l->SIDEMASK)==0){ row--; } if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; } } unsigned int save_bitmap=bitmap[row]; unsigned int bit=-bitmap[row]&bitmap[row]; bitmap[row]^=bit; l->board[row]=bit; //Q if((bit&mask)!=0){ if(row==(size-1)){ if( (save_bitmap&l->LASTMASK)==0){ symmetryOps(size,l); // } row--; }else{ unsigned int n=row++; left[row]=(left[n]|bit)<<1; down[row]=(down[n]|bit); right[row]=(right[n]|bit)>>1; bitmap[row]=mask&~(left[row]|down[row]|right[row]); } }else{ row--; } }else{ row--; } }//end while } // Q void symmetry_backTrack_corner_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l) { unsigned int mask=(1<<size)-1; unsigned int bit=0; unsigned int down[size]; unsigned int left[size]; unsigned int right[size]; unsigned int bitmap[size]; left[row]=_left; down[row]=_down; right[row]=_right; bitmap[row]=mask&~(left[row]|down[row]|right[row]); while(row>=2){ if(row<l->BOUND1){ // bitmap[row]=bitmap[row]|2; // bitmap[row]=bitmap[row]^2; bitmap[row]&=~2; } if(bitmap[row]>0){ bit=-bitmap[row]&bitmap[row]; bitmap[row]^=bit; if(row==(size-1)){ l->COUNT8++; row--; }else{ unsigned int n=row++; left[row]=(left[n]|bit)<<1; down[row]=(down[n]|bit); right[row]=(right[n]|bit)>>1; l->board[row]=bit; //Q // bitmap[row]=mask&~(left[row]|down[row]|right[row]); } }else{ row--; } }//end while } // void symmetry_NR(unsigned int size,struct local* l) { l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0; unsigned int bit=0; l->TOPBIT=1<<(size-1); l->ENDBIT=l->SIDEMASK=l->LASTMASK=0; l->BOUND1=2; l->BOUND2=0; l->board[0]=1; while(l->BOUND1>1&&l->BOUND1<size-1){ if(l->BOUND1<size-1){ bit=1<<l->BOUND1; l->board[1]=bit; //Q //Q symmetry_backTrack_corner_NR(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l); } l->BOUND1++; } l->TOPBIT=1<<(size-1); l->ENDBIT=l->TOPBIT>>1; l->SIDEMASK=l->TOPBIT|1; l->LASTMASK=l->TOPBIT|1; l->BOUND1=1; l->BOUND2=size-2; while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){ if(l->BOUND1<l->BOUND2){ bit=1<<l->BOUND1; l->board[0]=bit; //Q //Q symmetry_backTrack_NR(size,1,bit<<1,bit,bit>>1,l); } l->BOUND1++; l->BOUND2--; l->ENDBIT=l->ENDBIT>>1; l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1; }//ene while UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8; TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8; } /** CPU -r */ // Q void symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l) { unsigned int mask=(1<<size)-1; unsigned int bitmap=mask&~(left|down|right); if(row==(size-1)){ if(bitmap){ if( (bitmap&l->LASTMASK)==0){ l->board[row]=bitmap; //Q symmetryOps(size,l); // } } }else{ if(row<l->BOUND1){ bitmap=bitmap|l->SIDEMASK; bitmap=bitmap^l->SIDEMASK; }else{ if(row==l->BOUND2){ if((down&l->SIDEMASK)==0){ return; } if( (down&l->SIDEMASK)!=l->SIDEMASK){ bitmap=bitmap&l->SIDEMASK; } } } while(bitmap){ unsigned int bit=-bitmap&bitmap; bitmap=bitmap^bit; l->board[row]=bit; symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } } // Q void symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l) { unsigned int mask=(1<<size)-1; unsigned int bitmap=mask&~(left|down|right); unsigned int bit=0; if(row==(size-1)){ if(bitmap){ l->board[row]=bitmap; l->COUNT8++; } }else{ if(row<l->BOUND1){ // bitmap=bitmap|2; bitmap=bitmap^2; } while(bitmap){ bit=-bitmap&bitmap; bitmap=bitmap^bit; l->board[row]=bit; //Q symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } } // void symmetry_R(unsigned int size,struct local* l) { l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0; unsigned int bit=0; l->TOPBIT=1<<(size-1); l->ENDBIT=l->LASTMASK=l->SIDEMASK=0; l->BOUND1=2; l->BOUND2=0; l->board[0]=1; while(l->BOUND1>1 && l->BOUND1<size-1){ if(l->BOUND1<size-1){ bit=1<<l->BOUND1; l->board[1]=bit; //Q //Q symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l); } l->BOUND1++; }//end while l->TOPBIT=1<<(size-1); l->ENDBIT=l->TOPBIT>>1; l->SIDEMASK=l->TOPBIT|1; l->LASTMASK=l->TOPBIT|1; l->BOUND1=1; l->BOUND2=size-2; while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){ if(l->BOUND1<l->BOUND2){ bit=1<<l->BOUND1; l->board[0]=bit; //Q //Q symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l); } l->BOUND1++; l->BOUND2--; l->ENDBIT=l->ENDBIT>>1; l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1; }//ene while UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8; TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8; } /** GPU -g */ __device__ struct dlocal { unsigned int BOUND1,BOUND2; unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK; unsigned long board[MAX]; unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE; }dlocal; // GPU __host__ __device__ long GPU_symmetryOps(unsigned int size,struct dlocal* l) { /** (1) 9090( 180)90(270) */ if(l->board[l->BOUND2]==1){ unsigned int ptn; unsigned int own; for(ptn=2,own=1;own<size;++own,ptn<<=1){ unsigned int bit; unsigned int you; for(bit=1,you=size-1;(l->board[you]!=ptn)&& l->board[own]>=bit;--you){ bit<<=1; } if(l->board[own]>bit){ return 0; } if(l->board[own]<bit){ break; } }//end for // if(own>size-1){ l->COUNT2++; return 2; }//end if }//end if /** (2) 90270 180 180 () */ // if(l->board[size-1]==l->ENDBIT){ unsigned int you; unsigned int own; for(you=size-1-1,own=1;own<=size-1;++own,--you){ unsigned int bit; unsigned int ptn; for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){ bit<<=1; } if(l->board[own]>bit){ return 0; } if(l->board[own]<bit){ break; } }//end for // if(own>size-1){ l->COUNT4++; return 4; } }//end if /** (3)180() */ // if(l->board[l->BOUND1]==l->TOPBIT){ unsigned int ptn; unsigned int own; unsigned int you; unsigned int bit; for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){ for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){ bit<<=1; } if(l->board[own]>bit){ return 0; } if(l->board[own]<bit){ break; } }//end for }//end if l->COUNT8++; return 8; } // GPU Q __host__ __device__ long GPU_symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l) { unsigned long counter=0; unsigned int mask=(1<<size)-1; unsigned int bitmap=mask&~(left|down|right); if(row==(size-1)){ if(bitmap){ if( (bitmap& l->LASTMASK)==0){ l->board[row]=bitmap; //Q counter+=GPU_symmetryOps(size,l); // } } }else{ if(row<l->BOUND1){ bitmap=bitmap|l->SIDEMASK; bitmap=bitmap^l->SIDEMASK; }else{ if(row==l->BOUND2){ if((down&l->SIDEMASK)==0){ return 0; } if( (down&l->SIDEMASK)!=l->SIDEMASK){ bitmap=bitmap&l->SIDEMASK; } } } while(bitmap){ unsigned int bit=-bitmap&bitmap; bitmap=bitmap^bit; l->board[row]=bit; counter+=GPU_symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } return counter; } // GPU Q __host__ __device__ long GPU_symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l) { unsigned long counter=0; unsigned int mask=(1<<size)-1; unsigned int bitmap=mask&~(left|down|right); unsigned int bit=0; if(row==(size-1)){ if(bitmap){ l->board[row]=bitmap; l->COUNT8++; counter+=8; } }else{ if(row<l->BOUND1){ // bitmap=bitmap|2; bitmap=bitmap^2; } while(bitmap){ bit=-bitmap&bitmap; bitmap=bitmap^bit; l->board[row]=bit; //Q counter+=GPU_symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } return counter; } // GPU -g __host__ __device__ void GPU_symmetry_R(unsigned int size,struct local* hostLocal) { // GPU dlocal struct dlocal l; l.TOTAL=l.UNIQUE=l.COUNT2=l.COUNT4=l.COUNT8=0; unsigned int bit=0; l.TOPBIT=1<<(size-1); l.ENDBIT=l.LASTMASK=l.SIDEMASK=0; l.BOUND1=2; l.BOUND2=0; l.board[0]=1; while(l.BOUND1>1 && l.BOUND1<size-1){ if(l.BOUND1<size-1){ bit=1<<l.BOUND1; l.board[1]=bit; //Q //Q GPU_symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,&l); } l.BOUND1++; }//end while l.TOPBIT=1<<(size-1); l.ENDBIT=l.TOPBIT>>1; l.SIDEMASK=l.TOPBIT|1; l.LASTMASK=l.TOPBIT|1; l.BOUND1=1; l.BOUND2=size-2; while(l.BOUND1>0 && l.BOUND2<size-1 && l.BOUND1<l.BOUND2){ if(l.BOUND1<l.BOUND2){ bit=1<<l.BOUND1; l.board[0]=bit; //Q //Q GPU_symmetry_backTrack(size,1,bit<<1,bit,bit>>1,&l); } l.BOUND1++; l.BOUND2--; l.ENDBIT=l.ENDBIT>>1; l.LASTMASK=l.LASTMASK<<1|l.LASTMASK|l.LASTMASK>>1; }//ene while // hostLocal hostLocal->UNIQUE=l.COUNT2+l.COUNT4+l.COUNT8; hostLocal->TOTAL=l.COUNT2*2+l.COUNT4*4+l.COUNT8*8; } /** CUDA13 */ // GPU -n __device__ int BitBoard_symmetryOps(const unsigned int size,const unsigned int* board,struct local* l) { unsigned int own,ptn,you,bit; //90 if(board[l->BOUND2]==1){ own=1; ptn=2; while(own<=size-1){ bit=1; you=size-1; while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you--; } if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; } own++; ptn<<=1; } /** 90180/270 */ if(own>size-1){ return 2; } } //180 if(board[size-1]==l->ENDBIT){ own=1; you=size-1-1; while(own<=size-1){ bit=1; ptn=l->TOPBIT; while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; ptn>>=1; } if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; } own++; you--; } /** 90180 */ if(own>size-1){ return 4; } } //270 if(board[l->BOUND1]==l->TOPBIT){ own=1; ptn=l->TOPBIT>>1; while(own<=size-1){ bit=1; you=0; while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you++; } if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; } own++; ptn>>=1; } } return 8; } // GPU -n __global__ void BitBoard_cuda_kernel_b2(const unsigned int size,unsigned int mark,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int* board,unsigned int _row,struct cond* c,struct local* l) { const unsigned int mask=(1<<size)-1; unsigned long total=0; unsigned int unique=0; int row=0; unsigned int bit; // // // //ID unsigned const int tid=threadIdx.x; //ID unsigned const int bid=blockIdx.x; //ID unsigned const int idx=bid*blockDim.x+tid; // // // //shared //10mask //GPU10 //THREAD_NUM __shared__ unsigned int down[THREAD_NUM][10]; //down[tid][row]=_down[idx]; down[tid][row]=c[idx].DOWN; __shared__ unsigned int left[THREAD_NUM][10]; //left[tid][row]=_left[idx]; left[tid][row]=c[idx].LEFT; __shared__ unsigned int right[THREAD_NUM][10]; //right[tid][row]=_right[idx]; right[tid][row]=c[idx].RIGHT; __shared__ unsigned int bitmap[THREAD_NUM][10]; //down,left,rightbitmap bitmap[tid][row]=mask&~(down[tid][row]|left[tid][row]|right[tid][row]); __shared__ unsigned int sum[THREAD_NUM]; unsigned int c_aBoard[MAX]; __shared__ unsigned int usum[THREAD_NUM]; // //GPUSTEPS_cond if(idx<_cond){ //_down,_left,_right //down,left,right //CPU t_STEPS // // idx // for(int i=0;i<_row;i++){ c_aBoard[i]=board[idx*_row+i]; //1 } unsigned int bitmap_tid_row; unsigned int down_tid_row; unsigned int left_tid_row; unsigned int right_tid_row; while(row>=0){ bitmap_tid_row=bitmap[tid][row]; down_tid_row=down[tid][row]; left_tid_row=left[tid][row]; right_tid_row=right[tid][row]; // //bitmap[tid][row]=00000000 //1 if(bitmap_tid_row==0){ row--; }else{ /**11 **********/ // if(row+_row<l->BOUND1){ bitmap_tid_row=bitmap[tid][row]&=~l->SIDEMASK; // }else if(row+_row==l->BOUND2) { if((down_tid_row&l->SIDEMASK)==0){ row--; continue; } if((down_tid_row&l->SIDEMASK)!=l->SIDEMASK){ bitmap_tid_row=bitmap[tid][row]&=l->SIDEMASK; } } int save_bitmap=bitmap[tid][row]; // // bitmap[tid][row]^=c_aBoard[row+_row]=bit=(-bitmap_tid_row&bitmap_tid_row); if((bit&mask)!=0){ //? // if(row+1==mark){ /***11 l->LASTMASK*********************/ if((save_bitmap&l->LASTMASK)==0){ /***12 symmetryOps l->BOUND1,l->BOUND2,l->TOPBIT,l->ENDBIT*****/ int s=BitBoard_symmetryOps(size,c_aBoard,l); if(s!=0){ //print(size); //print()TOTAL++ //TOTAL // unique++; total+=s; // } row--; } }else{ int rowP=row+1; down[tid][rowP]=down_tid_row|bit; left[tid][rowP]=(left_tid_row|bit)<<1; right[tid][rowP]=(right_tid_row|bit)>>1; bitmap[tid][rowP] =mask&~( down[tid][rowP] |left[tid][rowP] |right[tid][rowP]); row++; } }else{ // row--; } } } //sum[tid] sum[tid]=total; usum[tid]=unique; }else{ //_condtotal sum[tid]=0; usum[tid]=0; } //__syncthreads() //__syncthreads() __syncthreads();if(tid<64&&tid+64<THREAD_NUM){ sum[tid]+=sum[tid+64]; usum[tid]+=usum[tid+64]; } __syncwarp();if(tid<32){ sum[tid]+=sum[tid+32]; usum[tid]+=usum[tid+32]; } __syncwarp();if(tid<16){ sum[tid]+=sum[tid+16]; usum[tid]+=usum[tid+16]; } __syncwarp();if(tid<8){ sum[tid]+=sum[tid+8]; usum[tid]+=usum[tid+8]; } __syncwarp();if(tid<4){ sum[tid]+=sum[tid+4]; usum[tid]+=usum[tid+4]; } __syncwarp();if(tid<2){ sum[tid]+=sum[tid+2]; usum[tid]+=usum[tid+2]; } __syncwarp();if(tid<1){ sum[tid]+=sum[tid+1]; usum[tid]+=usum[tid+1]; } __syncwarp();if(tid==0){ _total[bid]=sum[0]; _unique[bid]=usum[0]; } } // GPU -n __global__ void BitBoard_cuda_kernel_b1(const unsigned int size,unsigned int mark,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int _row,struct cond* c,struct local* l) { const unsigned int mask=(1<<size)-1; unsigned long total=0; unsigned int unique=0; int row=0; unsigned int bit; // // // //ID const unsigned int tid=threadIdx.x; //ID const unsigned int bid=blockIdx.x; //ID const unsigned int idx=bid*blockDim.x+tid; // // // //shared //10mask //GPU10 //THREAD_NUM __shared__ unsigned int down[THREAD_NUM][10]; down[tid][row]=c[idx].DOWN; __shared__ unsigned int left[THREAD_NUM][10]; left[tid][row]=c[idx].LEFT; __shared__ unsigned int right[THREAD_NUM][10]; right[tid][row]=c[idx].RIGHT; __shared__ unsigned int bitmap[THREAD_NUM][10]; bitmap[tid][row] =mask&~(down[tid][row]|left[tid][row]|right[tid][row]); __shared__ unsigned int sum[THREAD_NUM]; __shared__ unsigned int usum[THREAD_NUM]; // //GPUSTEPS_cond if(idx<_cond){ //_down,_left,_right //down,left,right //CPU t_STEPS // // idx // unsigned int bitmap_tid_row; unsigned int down_tid_row; unsigned int left_tid_row; unsigned int right_tid_row; while(row>=0){ bitmap_tid_row=bitmap[tid][row]; down_tid_row=down[tid][row]; left_tid_row=left[tid][row]; right_tid_row=right[tid][row]; if(bitmap_tid_row==0){ row--; }else{ /**11 **********/ if(row+_row<l->BOUND1) { bitmap_tid_row=bitmap[tid][row]&=~2; // bm|=2; bm^=2; (bm&=~2) } // // bitmap[tid][row] ^=bit =(-bitmap_tid_row&bitmap_tid_row); if((bit&mask)!=0){ //? // if(row+1==mark){ //TOTAL // unique++; total+=8; // //} row--; }else{ int rowP=row+1; down[tid][rowP]=down_tid_row|bit; left[tid][rowP]=(left_tid_row|bit)<<1; right[tid][rowP]=(right_tid_row|bit)>>1; bitmap[tid][rowP]=mask&~(down[tid][rowP]|left[tid][rowP]|right[tid][rowP]); row++; } }else{ // row--; } } } //sum[tid] sum[tid]=total; usum[tid]=unique; }else{ //_condtotal sum[tid]=0; usum[tid]=0; } //__syncthreads() //__syncthreads() __syncthreads(); if(tid<64&&tid+64<THREAD_NUM){ sum[tid]+=sum[tid+64]; usum[tid]+=usum[tid+64]; } __syncwarp(); if(tid<32){ sum[tid]+=sum[tid+32]; usum[tid]+=usum[tid+32]; } __syncwarp(); if(tid<16){ sum[tid]+=sum[tid+16]; usum[tid]+=usum[tid+16]; } __syncwarp(); if(tid<8){ sum[tid]+=sum[tid+8]; usum[tid]+=usum[tid+8]; } __syncwarp(); if(tid<4){ sum[tid]+=sum[tid+4]; usum[tid]+=usum[tid+4]; } __syncwarp(); if(tid<2){ sum[tid]+=sum[tid+2]; usum[tid]+=usum[tid+2]; } __syncwarp(); if(tid<1){ sum[tid]+=sum[tid+1]; usum[tid]+=usum[tid+1]; } __syncwarp(); if(tid==0){ _total[bid]=sum[0]; _unique[bid]=usum[0]; } } // GPU -n void BitBoard_backTrack2G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l) { //GPUGPU /***11 size<8mark2*********************/ unsigned int mark=size>12?size-10:3; //unsigned int mark=size>11?size-9:3; if(size<8){ mark=2; } const unsigned int h_mark=row; unsigned long totalCond=0; unsigned int mask=(1<<size)-1; bool matched=false; //host unsigned int down[32]; down[row]=_down; unsigned int right[32]; right[row]=_right; unsigned int left[32]; left[row]=_left; //bitmap //stack1 unsigned int bitmap[32]; bitmap[row]=mask&~(left[row]|down[row]|right[row]); unsigned int bit; struct local* hostLocal; hipHostMalloc((void**) &hostLocal,sizeof(struct local)*l->STEPS); struct local* deviceLocal; hipHostMalloc((void**) &deviceLocal,sizeof(struct local)*l->STEPS); struct cond* hostCond; hipHostMalloc((void**) &hostCond,sizeof(struct cond)*l->STEPS); struct cond* deviceCond; hipHostMalloc((void**) &deviceCond,sizeof(struct cond)*l->STEPS); unsigned int* hostTotal; hipHostMalloc((void**) &hostTotal,sizeof(long)*l->STEPS/THREAD_NUM); unsigned int* deviceTotal; hipMalloc((void**) &deviceTotal,sizeof(long)*l->STEPS/THREAD_NUM); unsigned int* hostUnique; hipHostMalloc((void**) &hostUnique,sizeof(long)*l->STEPS/THREAD_NUM); unsigned int* deviceUnique; hipMalloc((void**) &deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM); // unsigned int* hostBoard; hipHostMalloc((void**) &hostBoard,sizeof(int)*l->STEPS*mark); unsigned int* deviceBoard; hipMalloc((void**) &deviceBoard,sizeof(int)*l->STEPS*mark); // hostLocal[0].BOUND1=l->BOUND1; hostLocal[0].BOUND2=l->BOUND2; hostLocal[0].TOPBIT=l->TOPBIT; hostLocal[0].ENDBIT=l->ENDBIT; hostLocal[0].SIDEMASK=l->SIDEMASK; hostLocal[0].LASTMASK=l->LASTMASK; hostLocal[0].STEPS=l->STEPS; //123CPU->row==mark 3 //down,left,righthostDown ,hostLeft,hostRight // //->3GPU //13CPU //n15row=5CPU //GPU(GPU10 //) unsigned int rowP=0; unsigned long total=0; unsigned long unique=0; while(row>=h_mark) { //bitmap[row]=00000000 //1 if(bitmap[row]==0){ row--; }else{ // if(row<l->BOUND1){ // bitmap[row]&=~l->SIDEMASK; }else if(row==l->BOUND2) { // if((down[row]&l->SIDEMASK)==0){ row--; } if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; } } bitmap[row]^=l->board[row]=bit=(-bitmap[row]&bitmap[row]); if((bit&mask)!=0){ // rowP=row+1; down[rowP]=down[row]|bit; left[rowP]=(left[row]|bit)<<1; right[rowP]=(right[row]|bit)>>1; bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]); row++; if(row==mark){ //3(mark) //down,left,right // //GPU //totalCond threadId down,left,right //row=2(13n15row=5) //hostDown,hostLeft,hostRight hostCond[totalCond].DOWN=down[row]; hostCond[totalCond].LEFT=left[row]; hostCond[totalCond].RIGHT=right[row]; for(int i=0;i<mark;i++){ hostBoard[totalCond*mark+i]=l->board[i]; } // totalCond++; //GPUGPUSTEPSGPU // //ntotalCondSTEPSn // //totalCond==STEPS if(totalCond==l->STEPS){ //matched=trueCOUNT //GPUGPU //matched=true if(matched){ // hipMemcpy(hostTotal, deviceTotal, sizeof(long)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(hostUnique,deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); // for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } matched=false; } // hipMemcpy(deviceBoard,hostBoard,sizeof(int)*totalCond*mark, hipMemcpyHostToDevice); hipMemcpy(deviceCond, hostCond, sizeof(struct cond)*l->STEPS, hipMemcpyHostToDevice); hipMemcpy(deviceLocal,hostLocal,sizeof(struct local)*l->STEPS, hipMemcpyHostToDevice); // CUDA hipLaunchKernelGGL(( BitBoard_cuda_kernel_b2) , dim3(l->STEPS/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceTotal,deviceUnique,totalCond,deviceBoard,row,deviceCond,deviceLocal); //STEPS //totalCond //GPUGPUmatched=true matched=true; //totalCond==STEPSGPU0 //(STEPSGPU) totalCond=0; } //hostDown,hostLeft,hostRight1 // row=2 //hostDown,hostLeft,hostRight row--; } }else{ //row==markCPU //nqueen row--; } } } //matched=trueCOUNT //GPUGPU //matched=true if(matched){ // hipMemcpy(hostTotal, deviceTotal, sizeof(long)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(hostUnique,deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); // for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } matched=false; } // hipMemcpy(deviceBoard, hostBoard,sizeof(int)*totalCond*mark, hipMemcpyHostToDevice); hipMemcpy(deviceCond, hostCond, sizeof(struct cond)*totalCond, hipMemcpyHostToDevice); hipMemcpy(deviceLocal, hostLocal,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyHostToDevice); //size-mark GPU totalCond //STEPS //totalCond // CUDA hipLaunchKernelGGL(( BitBoard_cuda_kernel_b2) , dim3(l->STEPS/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceTotal,deviceUnique,totalCond,deviceBoard,mark,deviceCond,deviceLocal); // hipMemcpy(hostTotal, deviceTotal, sizeof(long)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(hostUnique,deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); // for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } TOTAL+=total; UNIQUE+=unique; hipFree(deviceTotal); hipFree(deviceUnique); hipFree(deviceBoard); hipFree(deviceCond); hipFree(deviceLocal); hipHostFree(hostTotal); hipHostFree(hostUnique); hipHostFree(hostBoard); hipHostFree(hostCond); hipHostFree(hostLocal); } // GPU -n void BitBoard_backTrack1G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l) { //GPUGPU /***08 mark3*********************/ const unsigned int mark=size>12?size-10:3; const unsigned int h_mark=row; const unsigned int mask=(1<<size)-1; unsigned long totalCond=0; bool matched=false; //host unsigned int down[32]; down[row]=_down; unsigned int right[32]; right[row]=_right; unsigned int left[32]; left[row]=_left; //bitmap //stack1 unsigned int bitmap[32]; bitmap[row]=mask&~(left[row]|down[row]|right[row]); unsigned int bit; /** unsigned int* hostDown; hipHostMalloc((void**) &hostDown,sizeof(int)*l->STEPS); unsigned int* hostLeft; hipHostMalloc((void**) &hostLeft,sizeof(int)*l->STEPS); unsigned int* hostRight; hipHostMalloc((void**) &hostRight,sizeof(int)*l->STEPS); unsigned int* deviceDown; hipMalloc((void**) &deviceDown,sizeof(int)*l->STEPS); unsigned int* deviceLeft; hipMalloc((void**) &deviceLeft,sizeof(int)*l->STEPS); unsigned int* deviceRight; hipMalloc((void**) &deviceRight,sizeof(int)*l->STEPS); */ struct local* hostLocal; hipHostMalloc((void**) &hostLocal,sizeof(struct local)*l->STEPS); struct local* deviceLocal; hipHostMalloc((void**) &deviceLocal,sizeof(struct local)*l->STEPS); struct cond* hostCond; hipHostMalloc((void**) &hostCond,sizeof(struct cond)*l->STEPS); struct cond* deviceCond; hipHostMalloc((void**) &deviceCond,sizeof(struct cond)*l->STEPS); unsigned int* hostTotal; hipHostMalloc((void**) &hostTotal,sizeof(int)*l->STEPS/THREAD_NUM); unsigned int* hostUnique; hipHostMalloc((void**) &hostUnique,sizeof(int)*l->STEPS/THREAD_NUM); unsigned int* deviceTotal; hipMalloc((void**) &deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM); unsigned int* deviceUnique; hipMalloc((void**) &deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM); hostLocal[0].BOUND1=l->BOUND1; hostLocal[0].BOUND2=l->BOUND2; hostLocal[0].TOPBIT=l->TOPBIT; hostLocal[0].ENDBIT=l->ENDBIT; hostLocal[0].SIDEMASK=l->SIDEMASK; hostLocal[0].LASTMASK=l->LASTMASK; hostLocal[0].STEPS=l->STEPS; //123CPU->row==mark 3 //down,left,right hostDown,hostLeft,hostRight // //->3GPU //13CPU //n15row=5CPU //GPU(GPU10 //) //while(row>=0) { int rowP=0; unsigned long total=0; unsigned long unique=0; while(row>=h_mark) { //bitmap[row]=00000000 //1 //06GPU if(bitmap[row]==0){ row--; } else{// if(row<l->BOUND1) { /***11 *********************/ bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2) } bitmap[row]^=bit=(-bitmap[row]&bitmap[row]); if((bit&mask)!=0){// rowP=row+1; down[rowP]=down[row]|bit; left[rowP]=(left[row]|bit)<<1; right[rowP]=(right[row]|bit)>>1; bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]); row++; if(row==mark){ //3(mark) //down,left,right // //GPU //totalCond threadId down,left,right //row=2(13n15row=5) //hostDown,hostLeft,hostRight /** hostDown[totalCond]=down[row]; hostLeft[totalCond]=left[row]; hostRight[totalCond]=right[row]; */ hostCond[totalCond].DOWN=down[row]; hostCond[totalCond].LEFT=left[row]; hostCond[totalCond].RIGHT=right[row]; // totalCond++; //GPUGPUSTEPSGPU // //ntotalCondSTEPSn // //totalCond==STEPS if(totalCond==l->STEPS){ //matched=trueCOUNT //GPUGPU //matched=true if(matched){ // hipMemcpy(hostTotal, deviceTotal, sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); // for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } matched=false; } // /** hipMemcpy(deviceDown, hostDown, sizeof(int)*totalCond, hipMemcpyHostToDevice); hipMemcpy(deviceLeft, hostLeft, sizeof(int)*totalCond, hipMemcpyHostToDevice); hipMemcpy(deviceRight,hostRight,sizeof(int)*totalCond, hipMemcpyHostToDevice); */ hipMemcpy(deviceCond, hostCond, sizeof(struct cond)*l->STEPS, hipMemcpyHostToDevice); hipMemcpy(deviceLocal,hostLocal,sizeof(struct local)*l->STEPS, hipMemcpyHostToDevice); // CUDA hipLaunchKernelGGL(( BitBoard_cuda_kernel_b1) , dim3(l->STEPS/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceTotal,deviceUnique,totalCond,row,deviceCond,deviceLocal); //STEPS //totalCond //GPUGPUmatched=true matched=true; //totalCond==STEPSGPU0 //(STEPSGPU) totalCond=0; } //hostDown,hostLeft,hostRight1 // row=2 //hostDown,hostLeft,hostRight row--; } }else{ //row==markCPU //nqueen row--; } } } //matched=trueCOUNT //GPUGPU //matched=true if(matched){ // hipMemcpy(hostTotal, deviceTotal, sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); // for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } matched=false; } // /* hipMemcpy(deviceDown, hostDown, sizeof(int)*totalCond, hipMemcpyHostToDevice); hipMemcpy(deviceLeft, hostLeft, sizeof(int)*totalCond, hipMemcpyHostToDevice); hipMemcpy(deviceRight,hostRight,sizeof(int)*totalCond, hipMemcpyHostToDevice); */ hipMemcpy(deviceCond, hostCond, sizeof(struct cond)*totalCond, hipMemcpyHostToDevice); hipMemcpy(deviceLocal,hostLocal,sizeof(int)*l->STEPS/THREAD_NUM, hipMemcpyHostToDevice); // CUDA hipLaunchKernelGGL(( BitBoard_cuda_kernel_b1) , dim3(l->STEPS/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, //size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,mark,deviceCond,deviceLocal); (size,size-mark,deviceTotal,deviceUnique,totalCond,mark,deviceCond,deviceLocal); /* // */ hipMemcpy(hostTotal, deviceTotal, sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost); // for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } TOTAL+=total; UNIQUE+=unique; // /* hipFree(deviceDown); hipFree(deviceLeft); hipFree(deviceRight); */ hipFree(deviceTotal); hipFree(deviceUnique); hipFree(deviceCond); hipFree(deviceLocal); /* hipHostFree(hostDown); hipHostFree(hostLeft); hipHostFree(hostRight); */ hipHostFree(hostTotal); hipHostFree(hostUnique); hipHostFree(hostCond); hipHostFree(hostLocal); } // GPU -n void BitBoard_build(const unsigned int size,int STEPS) { if(size<=0||size>32){return;} /** int unsigned total: TOTAL */ struct local l; //GPU l.STEPS=STEPS; unsigned int bit=1; l.board[0]=1; unsigned int left=bit<<1,down=bit,right=bit>>1; /** 232 */ for(l.BOUND1=2;l.BOUND1<size-1;l.BOUND1++){ l.board[1]=bit=(1<<l.BOUND1); BitBoard_backTrack1G(size,2,(left|bit)<<1,(down|bit),(right|bit)>>1,&l); } l.TOPBIT=1<<(size-1); l.SIDEMASK=l.LASTMASK=(l.TOPBIT|1); l.ENDBIT=(l.TOPBIT>>1); /** 12 1/2 n=8 1,2,3 1/2+1 n=9 1,2,3,4 */ for(l.BOUND1=1,l.BOUND2=size-1-1;l.BOUND1<l.BOUND2;l.BOUND1++,l.BOUND2--){ l.board[0]=bit=(1<<l.BOUND1); BitBoard_backTrack2G(size,1,bit<<1,bit,bit>>1,&l); l.LASTMASK|=l.LASTMASK>>1|l.LASTMASK<<1; l.ENDBIT>>=1; } } // CUDA bool InitCUDA() { int count; hipGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} unsigned int i; for(i=0;i<count;++i){ struct hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} hipSetDevice(i); return true; } // int main(int argc,char** argv) { bool cpu=false,cpur=false,gpu=false,gpuBitBoard=false; unsigned int argstart=2; if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuBitBoard=true;} else{ gpuBitBoard=true; } //gpu argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s] n STEPS\n",argv[0]); printf(" -r: CPU \n"); printf(" -c: CPU \n"); printf(" -g: GPU \n"); printf(" -n: GPU \n"); } if(cpur){ printf("\n\n \n"); } else if(cpu){ printf("\n\n \n"); } else if(gpu){ printf("\n\n GPU\n"); } else if(gpuBitBoard){ printf("\n\n GPU \n"); } if(cpu||cpur) { unsigned int min=4; unsigned int targetN=17; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(unsigned int size=min;size<=targetN;size++){ local l; gettimeofday(&t0,NULL);// if(cpur){ // symmetry_R(size,&l); } if(cpu){ // symmetry_NR(size,&l); } // gettimeofday(&t1,NULL);// unsigned int ss; unsigned int ms; unsigned int dd; if(t1.tv_usec<t0.tv_usec) { dd=(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; }else { dd=(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if unsigned int hh=ss/3600; unsigned int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); } //end for }//end if if(gpu||gpuBitBoard) { int STEPS=24576; if(!InitCUDA()){return 0;} unsigned int min=4; unsigned int targetN=21; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(unsigned int size=min;size<=targetN;size++){ gettimeofday(&t0,NULL); if(gpu){ TOTAL=UNIQUE=0; local l[MAX]; GPU_symmetry_R(size,&l[0]); TOTAL=l->TOTAL; UNIQUE=l->UNIQUE; }else if(gpuBitBoard){ TOTAL=UNIQUE=0; BitBoard_build(size,STEPS); } gettimeofday(&t1,NULL); unsigned int ss; unsigned int ms; unsigned int dd; if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if unsigned int hh=ss/3600; unsigned int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); } } return 0; }
c17924a807f691d8c65f9039bf71cf4e6676be6d.cu
/** * * bash版対称解除法のC言語版のGPU/CUDA移植版 * 詳しい説明はこちらをどうぞ https://suzukiiichiro.github.io/search/?keyword=Nクイーン問題 非再帰でのコンパイルと実行 $ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -c 再帰でのコンパイルと実行 $ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -r GPU で並列処理せずに実行 $ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n GPU で並列処理で実行(ビットボード) $ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n * */ #include <iostream> #include <vector> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define MAX 27 #define THREAD_NUM 96 // システムによって以下のマクロが必要であればコメントを外してください。 //#define UINT64_C(c) c ## ULL // // グローバル変数 unsigned long TOTAL=0; unsigned long UNIQUE=0; //GPU で使うローカル構造体 typedef struct local { unsigned int BOUND1,BOUND2; unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK; unsigned long board[MAX]; unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE; unsigned int STEPS; }local; typedef struct cond { unsigned int DOWN; unsigned int LEFT; unsigned int RIGHT; }cond; // CPU 再帰/非再帰共通 対称解除法 void symmetryOps(unsigned int size,struct local* l) { /** 2.クイーンが右上角以外にある場合、 (1) 90度回転させてオリジナルと同型になる場合、さらに90度回転(オリジナルか ら180度回転)させても、さらに90度回転(オリジナルから270度回転)させてもオリ ジナルと同型になる。 こちらに該当するユニーク解が属するグループの要素数は、左右反転させたパター ンを加えて2個しかありません。 */ if(l->board[l->BOUND2]==1){ unsigned int ptn; unsigned int own; for(ptn=2,own=1;own<size;++own,ptn<<=1){ unsigned int bit; unsigned int you; for(bit=1,you=size-1;(l->board[you]!=ptn)&&l->board[own]>=bit;--you){ bit<<=1; } if(l->board[own]>bit){ return ; } if(l->board[own]<bit){ break; } }//end for // 90度回転して同型なら180度回転しても270度回転しても同型である if(own>size-1){ l->COUNT2++; return ; }//end if }//end if /** 2.クイーンが右上角以外にある場合、 (2) 90度回転させてオリジナルと異なる場合は、270度回転させても必ずオリジナル とは異なる。ただし、180度回転させた場合はオリジナルと同型になることも有り得 る。こちらに該当するユニーク解が属するグループの要素数は、180度回転させて同 型になる場合は4個(左右反転×縦横回転) */ //180度回転 if(l->board[size-1]==l->ENDBIT){ unsigned int you; unsigned int own; for(you=size-1-1,own=1;own<=size-1;++own,--you){ unsigned int bit; unsigned int ptn; for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){ bit<<=1; } if(l->board[own]>bit){ return ; } if(l->board[own]<bit){ break; } }//end for //90度回転が同型でなくても180度回転が同型であることもある if(own>size-1){ l->COUNT4++; return ; } }//end if /** 2.クイーンが右上角以外にある場合、 (3)180度回転させてもオリジナルと異なる場合は、8個(左右反転×縦横回転×上下反転) */ //270度回転 if(l->board[l->BOUND1]==l->TOPBIT){ unsigned int ptn; unsigned int own; unsigned int you; unsigned int bit; for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){ for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){ bit<<=1; } if(l->board[own]>bit){ return ; } if(l->board[own]<bit){ break; } }//end for }//end if l->COUNT8++; } /** CPU -c */ // 非再帰 角にQがないときのバックトラック void symmetry_backTrack_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l) { unsigned int mask=(1<<size)-1; unsigned int down[size]; unsigned int left[size]; unsigned int right[size]; unsigned int bitmap[size]; left[row]=_left; down[row]=_down; right[row]=_right; bitmap[row]=mask&~(left[row]|down[row]|right[row]); while(row>0){ if(bitmap[row]>0){ if(row<l->BOUND1){ //上部サイド枝刈り bitmap[row]|=l->SIDEMASK; bitmap[row]^=l->SIDEMASK; }else if(row==l->BOUND2){ //下部サイド枝刈り if((down[row]&l->SIDEMASK)==0){ row--; } if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; } } unsigned int save_bitmap=bitmap[row]; unsigned int bit=-bitmap[row]&bitmap[row]; bitmap[row]^=bit; l->board[row]=bit; //Qを配置 if((bit&mask)!=0){ if(row==(size-1)){ if( (save_bitmap&l->LASTMASK)==0){ symmetryOps(size,l); //対称解除法 } row--; }else{ unsigned int n=row++; left[row]=(left[n]|bit)<<1; down[row]=(down[n]|bit); right[row]=(right[n]|bit)>>1; bitmap[row]=mask&~(left[row]|down[row]|right[row]); } }else{ row--; } }else{ row--; } }//end while } // 非再帰 角にQがあるときのバックトラック void symmetry_backTrack_corner_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l) { unsigned int mask=(1<<size)-1; unsigned int bit=0; unsigned int down[size]; unsigned int left[size]; unsigned int right[size]; unsigned int bitmap[size]; left[row]=_left; down[row]=_down; right[row]=_right; bitmap[row]=mask&~(left[row]|down[row]|right[row]); while(row>=2){ if(row<l->BOUND1){ // bitmap[row]=bitmap[row]|2; // bitmap[row]=bitmap[row]^2; bitmap[row]&=~2; } if(bitmap[row]>0){ bit=-bitmap[row]&bitmap[row]; bitmap[row]^=bit; if(row==(size-1)){ l->COUNT8++; row--; }else{ unsigned int n=row++; left[row]=(left[n]|bit)<<1; down[row]=(down[n]|bit); right[row]=(right[n]|bit)>>1; l->board[row]=bit; //Qを配置 //クイーンが配置可能な位置を表す bitmap[row]=mask&~(left[row]|down[row]|right[row]); } }else{ row--; } }//end while } // 非再帰 対称解除法 void symmetry_NR(unsigned int size,struct local* l) { l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0; unsigned int bit=0; l->TOPBIT=1<<(size-1); l->ENDBIT=l->SIDEMASK=l->LASTMASK=0; l->BOUND1=2; l->BOUND2=0; l->board[0]=1; while(l->BOUND1>1&&l->BOUND1<size-1){ if(l->BOUND1<size-1){ bit=1<<l->BOUND1; l->board[1]=bit; //2行目にQを配置 //角にQがあるときのバックトラック symmetry_backTrack_corner_NR(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l); } l->BOUND1++; } l->TOPBIT=1<<(size-1); l->ENDBIT=l->TOPBIT>>1; l->SIDEMASK=l->TOPBIT|1; l->LASTMASK=l->TOPBIT|1; l->BOUND1=1; l->BOUND2=size-2; while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){ if(l->BOUND1<l->BOUND2){ bit=1<<l->BOUND1; l->board[0]=bit; //Qを配置 //角にQがないときのバックトラック symmetry_backTrack_NR(size,1,bit<<1,bit,bit>>1,l); } l->BOUND1++; l->BOUND2--; l->ENDBIT=l->ENDBIT>>1; l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1; }//ene while UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8; TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8; } /** CPU -r */ // 再帰 角にQがないときのバックトラック void symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l) { unsigned int mask=(1<<size)-1; unsigned int bitmap=mask&~(left|down|right); if(row==(size-1)){ if(bitmap){ if( (bitmap&l->LASTMASK)==0){ l->board[row]=bitmap; //Qを配置 symmetryOps(size,l); //対称解除 } } }else{ if(row<l->BOUND1){ bitmap=bitmap|l->SIDEMASK; bitmap=bitmap^l->SIDEMASK; }else{ if(row==l->BOUND2){ if((down&l->SIDEMASK)==0){ return; } if( (down&l->SIDEMASK)!=l->SIDEMASK){ bitmap=bitmap&l->SIDEMASK; } } } while(bitmap){ unsigned int bit=-bitmap&bitmap; bitmap=bitmap^bit; l->board[row]=bit; symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } } // 再帰 角にQがあるときのバックトラック void symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l) { unsigned int mask=(1<<size)-1; unsigned int bitmap=mask&~(left|down|right); unsigned int bit=0; if(row==(size-1)){ if(bitmap){ l->board[row]=bitmap; l->COUNT8++; } }else{ if(row<l->BOUND1){ //枝刈り bitmap=bitmap|2; bitmap=bitmap^2; } while(bitmap){ bit=-bitmap&bitmap; bitmap=bitmap^bit; l->board[row]=bit; //Qを配置 symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } } // 再帰 対称解除法 void symmetry_R(unsigned int size,struct local* l) { l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0; unsigned int bit=0; l->TOPBIT=1<<(size-1); l->ENDBIT=l->LASTMASK=l->SIDEMASK=0; l->BOUND1=2; l->BOUND2=0; l->board[0]=1; while(l->BOUND1>1 && l->BOUND1<size-1){ if(l->BOUND1<size-1){ bit=1<<l->BOUND1; l->board[1]=bit; //2行目にQを配置 //角にQがあるときのバックトラック symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l); } l->BOUND1++; }//end while l->TOPBIT=1<<(size-1); l->ENDBIT=l->TOPBIT>>1; l->SIDEMASK=l->TOPBIT|1; l->LASTMASK=l->TOPBIT|1; l->BOUND1=1; l->BOUND2=size-2; while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){ if(l->BOUND1<l->BOUND2){ bit=1<<l->BOUND1; l->board[0]=bit; //Qを配置 //角にQがないときのバックトラック symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l); } l->BOUND1++; l->BOUND2--; l->ENDBIT=l->ENDBIT>>1; l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1; }//ene while UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8; TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8; } /** GPU -g */ __device__ struct dlocal { unsigned int BOUND1,BOUND2; unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK; unsigned long board[MAX]; unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE; }dlocal; // GPU 対称解除法 __host__ __device__ long GPU_symmetryOps(unsigned int size,struct dlocal* l) { /** 2.クイーンが右上角以外にある場合、 (1) 90度回転させてオリジナルと同型になる場合、さらに90度回転(オリジナルか ら180度回転)させても、さらに90度回転(オリジナルから270度回転)させてもオリ ジナルと同型になる。 こちらに該当するユニーク解が属するグループの要素数は、左右反転させたパター ンを加えて2個しかありません。 */ if(l->board[l->BOUND2]==1){ unsigned int ptn; unsigned int own; for(ptn=2,own=1;own<size;++own,ptn<<=1){ unsigned int bit; unsigned int you; for(bit=1,you=size-1;(l->board[you]!=ptn)&& l->board[own]>=bit;--you){ bit<<=1; } if(l->board[own]>bit){ return 0; } if(l->board[own]<bit){ break; } }//end for // 90度回転して同型なら180度回転しても270度回転しても同型である if(own>size-1){ l->COUNT2++; return 2; }//end if }//end if /** 2.クイーンが右上角以外にある場合、 (2) 90度回転させてオリジナルと異なる場合は、270度回転させても必ずオリジナル とは異なる。ただし、180度回転させた場合はオリジナルと同型になることも有り得 る。こちらに該当するユニーク解が属するグループの要素数は、180度回転させて同 型になる場合は4個(左右反転×縦横回転) */ //180度回転 if(l->board[size-1]==l->ENDBIT){ unsigned int you; unsigned int own; for(you=size-1-1,own=1;own<=size-1;++own,--you){ unsigned int bit; unsigned int ptn; for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){ bit<<=1; } if(l->board[own]>bit){ return 0; } if(l->board[own]<bit){ break; } }//end for //90度回転が同型でなくても180度回転が同型であることもある if(own>size-1){ l->COUNT4++; return 4; } }//end if /** 2.クイーンが右上角以外にある場合、 (3)180度回転させてもオリジナルと異なる場合は、8個(左右反転×縦横回転×上下反転) */ //270度回転 if(l->board[l->BOUND1]==l->TOPBIT){ unsigned int ptn; unsigned int own; unsigned int you; unsigned int bit; for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){ for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){ bit<<=1; } if(l->board[own]>bit){ return 0; } if(l->board[own]<bit){ break; } }//end for }//end if l->COUNT8++; return 8; } // GPU 角にQがないときのバックトラック __host__ __device__ long GPU_symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l) { unsigned long counter=0; unsigned int mask=(1<<size)-1; unsigned int bitmap=mask&~(left|down|right); if(row==(size-1)){ if(bitmap){ if( (bitmap& l->LASTMASK)==0){ l->board[row]=bitmap; //Qを配置 counter+=GPU_symmetryOps(size,l); //対称解除 } } }else{ if(row<l->BOUND1){ bitmap=bitmap|l->SIDEMASK; bitmap=bitmap^l->SIDEMASK; }else{ if(row==l->BOUND2){ if((down&l->SIDEMASK)==0){ return 0; } if( (down&l->SIDEMASK)!=l->SIDEMASK){ bitmap=bitmap&l->SIDEMASK; } } } while(bitmap){ unsigned int bit=-bitmap&bitmap; bitmap=bitmap^bit; l->board[row]=bit; counter+=GPU_symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } return counter; } // GPU 角にQがあるときのバックトラック __host__ __device__ long GPU_symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l) { unsigned long counter=0; unsigned int mask=(1<<size)-1; unsigned int bitmap=mask&~(left|down|right); unsigned int bit=0; if(row==(size-1)){ if(bitmap){ l->board[row]=bitmap; l->COUNT8++; counter+=8; } }else{ if(row<l->BOUND1){ //枝刈り bitmap=bitmap|2; bitmap=bitmap^2; } while(bitmap){ bit=-bitmap&bitmap; bitmap=bitmap^bit; l->board[row]=bit; //Qを配置 counter+=GPU_symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l); } } return counter; } // GPU 対称解除法 -g の実行時のみ呼び出されます __host__ __device__ void GPU_symmetry_R(unsigned int size,struct local* hostLocal) { // GPU内部で使うための dlocal構造体 struct dlocal l; l.TOTAL=l.UNIQUE=l.COUNT2=l.COUNT4=l.COUNT8=0; unsigned int bit=0; l.TOPBIT=1<<(size-1); l.ENDBIT=l.LASTMASK=l.SIDEMASK=0; l.BOUND1=2; l.BOUND2=0; l.board[0]=1; while(l.BOUND1>1 && l.BOUND1<size-1){ if(l.BOUND1<size-1){ bit=1<<l.BOUND1; l.board[1]=bit; //2行目にQを配置 //角にQがあるときのバックトラック GPU_symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,&l); } l.BOUND1++; }//end while l.TOPBIT=1<<(size-1); l.ENDBIT=l.TOPBIT>>1; l.SIDEMASK=l.TOPBIT|1; l.LASTMASK=l.TOPBIT|1; l.BOUND1=1; l.BOUND2=size-2; while(l.BOUND1>0 && l.BOUND2<size-1 && l.BOUND1<l.BOUND2){ if(l.BOUND1<l.BOUND2){ bit=1<<l.BOUND1; l.board[0]=bit; //Qを配置 //角にQがないときのバックトラック GPU_symmetry_backTrack(size,1,bit<<1,bit,bit>>1,&l); } l.BOUND1++; l.BOUND2--; l.ENDBIT=l.ENDBIT>>1; l.LASTMASK=l.LASTMASK<<1|l.LASTMASK|l.LASTMASK>>1; }//ene while // 集計値は hostLocalへ代入 hostLocal->UNIQUE=l.COUNT2+l.COUNT4+l.COUNT8; hostLocal->TOTAL=l.COUNT2*2+l.COUNT4*4+l.COUNT8*8; } /** CUDA13 */ // GPU -n 対称解除法 __device__ int BitBoard_symmetryOps(const unsigned int size,const unsigned int* board,struct local* l) { unsigned int own,ptn,you,bit; //90度回転 if(board[l->BOUND2]==1){ own=1; ptn=2; while(own<=size-1){ bit=1; you=size-1; while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you--; } if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; } own++; ptn<<=1; } /** 90度回転して同型なら180度/270度回転も同型である */ if(own>size-1){ return 2; } } //180度回転 if(board[size-1]==l->ENDBIT){ own=1; you=size-1-1; while(own<=size-1){ bit=1; ptn=l->TOPBIT; while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; ptn>>=1; } if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; } own++; you--; } /** 90度回転が同型でなくても180度回転が同型である事もある */ if(own>size-1){ return 4; } } //270度回転 if(board[l->BOUND1]==l->TOPBIT){ own=1; ptn=l->TOPBIT>>1; while(own<=size-1){ bit=1; you=0; while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you++; } if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; } own++; ptn>>=1; } } return 8; } // GPU -n Qが角にない場合のバックトラック内の再帰処理をカーネルで行う __global__ void BitBoard_cuda_kernel_b2(const unsigned int size,unsigned int mark,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int* board,unsigned int _row,struct cond* c,struct local* l) { const unsigned int mask=(1<<size)-1; unsigned long total=0; unsigned int unique=0; int row=0; unsigned int bit; // //スレッド // //ブロック内のスレッドID unsigned const int tid=threadIdx.x; //グリッド内のブロックID unsigned const int bid=blockIdx.x; //全体通してのID unsigned const int idx=bid*blockDim.x+tid; // //シェアードメモリ // //sharedメモリを使う ブロック内スレッドで共有 //10固定なのは現在のmask設定で //GPUで実行するのは最大10だから //THREAD_NUMはブロックあたりのスレッド数 __shared__ unsigned int down[THREAD_NUM][10]; //down[tid][row]=_down[idx]; down[tid][row]=c[idx].DOWN; __shared__ unsigned int left[THREAD_NUM][10]; //left[tid][row]=_left[idx]; left[tid][row]=c[idx].LEFT; __shared__ unsigned int right[THREAD_NUM][10]; //right[tid][row]=_right[idx]; right[tid][row]=c[idx].RIGHT; __shared__ unsigned int bitmap[THREAD_NUM][10]; //down,left,rightからbitmapを出す bitmap[tid][row]=mask&~(down[tid][row]|left[tid][row]|right[tid][row]); __shared__ unsigned int sum[THREAD_NUM]; unsigned int c_aBoard[MAX]; __shared__ unsigned int usum[THREAD_NUM]; //余分なスレッドは動かさない //GPUはSTEPS数起動するが_cond以上は空回しする if(idx<_cond){ //_down,_left,_rightの情報を //down,left,rightに詰め直す //CPU で詰め込んだ t_はSTEPS個あるが //ブロック内ではブロックあたりのスレッド数に限定 //されるので idxでよい // for(int i=0;i<_row;i++){ c_aBoard[i]=board[idx*_row+i]; //2次元配列だが1次元的に利用 } unsigned int bitmap_tid_row; unsigned int down_tid_row; unsigned int left_tid_row; unsigned int right_tid_row; while(row>=0){ bitmap_tid_row=bitmap[tid][row]; down_tid_row=down[tid][row]; left_tid_row=left[tid][row]; right_tid_row=right[tid][row]; // //bitmap[tid][row]=00000000 クイーンを //どこにも置けないので1行上に戻る if(bitmap_tid_row==0){ row--; }else{ /**11 枝刈り追加**********/ //【枝刈り】上部サイド枝刈り if(row+_row<l->BOUND1){ bitmap_tid_row=bitmap[tid][row]&=~l->SIDEMASK; //【枝刈り】下部サイド枝刈り }else if(row+_row==l->BOUND2) { if((down_tid_row&l->SIDEMASK)==0){ row--; continue; } if((down_tid_row&l->SIDEMASK)!=l->SIDEMASK){ bitmap_tid_row=bitmap[tid][row]&=l->SIDEMASK; } } int save_bitmap=bitmap[tid][row]; //クイーンを置く //置く場所があるかどうか bitmap[tid][row]^=c_aBoard[row+_row]=bit=(-bitmap_tid_row&bitmap_tid_row); if((bit&mask)!=0){ //最終行?最終行から1個前の行まで //無事到達したら 加算する if(row+1==mark){ /***11 l->LASTMASK枝刈り*********************/ if((save_bitmap&l->LASTMASK)==0){ /***12 symmetryOps 省力化のためl->BOUND1,l->BOUND2,l->TOPBIT,l->ENDBITを渡す*****/ int s=BitBoard_symmetryOps(size,c_aBoard,l); if(s!=0){ //print(size); //print()でTOTALを++しない //ホストに戻す配列にTOTALを入れる //スレッドが1つの場合は配列は1個 unique++; total+=s; //対称解除で得られた解数を加算 } row--; } }else{ int rowP=row+1; down[tid][rowP]=down_tid_row|bit; left[tid][rowP]=(left_tid_row|bit)<<1; right[tid][rowP]=(right_tid_row|bit)>>1; bitmap[tid][rowP] =mask&~( down[tid][rowP] |left[tid][rowP] |right[tid][rowP]); row++; } }else{ //置く場所がなければ1個上に row--; } } } //最後sum[tid]に加算する sum[tid]=total; usum[tid]=unique; }else{ //_cond未満は空回しするのでtotalは加算しない sum[tid]=0; usum[tid]=0; } //__syncthreads()でブロック内のスレッド間の同期 //全てのスレッドが__syncthreads()に辿り着くのを待つ __syncthreads();if(tid<64&&tid+64<THREAD_NUM){ sum[tid]+=sum[tid+64]; usum[tid]+=usum[tid+64]; } __syncwarp();if(tid<32){ sum[tid]+=sum[tid+32]; usum[tid]+=usum[tid+32]; } __syncwarp();if(tid<16){ sum[tid]+=sum[tid+16]; usum[tid]+=usum[tid+16]; } __syncwarp();if(tid<8){ sum[tid]+=sum[tid+8]; usum[tid]+=usum[tid+8]; } __syncwarp();if(tid<4){ sum[tid]+=sum[tid+4]; usum[tid]+=usum[tid+4]; } __syncwarp();if(tid<2){ sum[tid]+=sum[tid+2]; usum[tid]+=usum[tid+2]; } __syncwarp();if(tid<1){ sum[tid]+=sum[tid+1]; usum[tid]+=usum[tid+1]; } __syncwarp();if(tid==0){ _total[bid]=sum[0]; _unique[bid]=usum[0]; } } // GPU -n Qが角にある場合のバックトラック内の再帰処理をカーネルで行う __global__ void BitBoard_cuda_kernel_b1(const unsigned int size,unsigned int mark,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int _row,struct cond* c,struct local* l) { const unsigned int mask=(1<<size)-1; unsigned long total=0; unsigned int unique=0; int row=0; unsigned int bit; // //スレッド // //ブロック内のスレッドID const unsigned int tid=threadIdx.x; //グリッド内のブロックID const unsigned int bid=blockIdx.x; //全体通してのID const unsigned int idx=bid*blockDim.x+tid; // //シェアードメモリ // //sharedメモリを使う ブロック内スレッドで共有 //10固定なのは現在のmask設定で //GPUで実行するのは最大10だから //THREAD_NUMはブロックあたりのスレッド数 __shared__ unsigned int down[THREAD_NUM][10]; down[tid][row]=c[idx].DOWN; __shared__ unsigned int left[THREAD_NUM][10]; left[tid][row]=c[idx].LEFT; __shared__ unsigned int right[THREAD_NUM][10]; right[tid][row]=c[idx].RIGHT; __shared__ unsigned int bitmap[THREAD_NUM][10]; bitmap[tid][row] =mask&~(down[tid][row]|left[tid][row]|right[tid][row]); __shared__ unsigned int sum[THREAD_NUM]; __shared__ unsigned int usum[THREAD_NUM]; //余分なスレッドは動かさない //GPUはSTEPS数起動するが_cond以上は空回しする if(idx<_cond){ //_down,_left,_rightの情報を //down,left,rightに詰め直す //CPU で詰め込んだ t_はSTEPS個あるが //ブロック内ではブロックあたりのスレッド数に限定 //されるので idxでよい // unsigned int bitmap_tid_row; unsigned int down_tid_row; unsigned int left_tid_row; unsigned int right_tid_row; while(row>=0){ bitmap_tid_row=bitmap[tid][row]; down_tid_row=down[tid][row]; left_tid_row=left[tid][row]; right_tid_row=right[tid][row]; if(bitmap_tid_row==0){ row--; }else{ /**11 枝刈り**********/ if(row+_row<l->BOUND1) { bitmap_tid_row=bitmap[tid][row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等) } //クイーンを置く //置く場所があるかどうか bitmap[tid][row] ^=bit =(-bitmap_tid_row&bitmap_tid_row); if((bit&mask)!=0){ //最終行?最終行から1個前の行まで //無事到達したら 加算する if(row+1==mark){ //ホストに戻す配列にTOTALを入れる //スレッドが1つの場合は配列は1個 unique++; total+=8; //対称解除で得られた解数を加算 //} row--; }else{ int rowP=row+1; down[tid][rowP]=down_tid_row|bit; left[tid][rowP]=(left_tid_row|bit)<<1; right[tid][rowP]=(right_tid_row|bit)>>1; bitmap[tid][rowP]=mask&~(down[tid][rowP]|left[tid][rowP]|right[tid][rowP]); row++; } }else{ //置く場所がなければ1個上に row--; } } } //最後sum[tid]に加算する sum[tid]=total; usum[tid]=unique; }else{ //_cond未満は空回しするのでtotalは加算しない sum[tid]=0; usum[tid]=0; } //__syncthreads()でブロック内のスレッド間の同期 //全てのスレッドが__syncthreads()に辿り着くのを待つ __syncthreads(); if(tid<64&&tid+64<THREAD_NUM){ sum[tid]+=sum[tid+64]; usum[tid]+=usum[tid+64]; } __syncwarp(); if(tid<32){ sum[tid]+=sum[tid+32]; usum[tid]+=usum[tid+32]; } __syncwarp(); if(tid<16){ sum[tid]+=sum[tid+16]; usum[tid]+=usum[tid+16]; } __syncwarp(); if(tid<8){ sum[tid]+=sum[tid+8]; usum[tid]+=usum[tid+8]; } __syncwarp(); if(tid<4){ sum[tid]+=sum[tid+4]; usum[tid]+=usum[tid+4]; } __syncwarp(); if(tid<2){ sum[tid]+=sum[tid+2]; usum[tid]+=usum[tid+2]; } __syncwarp(); if(tid<1){ sum[tid]+=sum[tid+1]; usum[tid]+=usum[tid+1]; } __syncwarp(); if(tid==0){ _total[bid]=sum[0]; _unique[bid]=usum[0]; } } // GPU -n Qが角にない void BitBoard_backTrack2G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l) { //何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く /***11 size<8の時はmarkが2*********************/ unsigned int mark=size>12?size-10:3; //unsigned int mark=size>11?size-9:3; if(size<8){ mark=2; } const unsigned int h_mark=row; unsigned long totalCond=0; unsigned int mask=(1<<size)-1; bool matched=false; //host unsigned int down[32]; down[row]=_down; unsigned int right[32]; right[row]=_right; unsigned int left[32]; left[row]=_left; //bitmapを配列で持つことにより //stackを使わないで1行前に戻れる unsigned int bitmap[32]; bitmap[row]=mask&~(left[row]|down[row]|right[row]); unsigned int bit; struct local* hostLocal; cudaMallocHost((void**) &hostLocal,sizeof(struct local)*l->STEPS); struct local* deviceLocal; cudaMallocHost((void**) &deviceLocal,sizeof(struct local)*l->STEPS); struct cond* hostCond; cudaMallocHost((void**) &hostCond,sizeof(struct cond)*l->STEPS); struct cond* deviceCond; cudaMallocHost((void**) &deviceCond,sizeof(struct cond)*l->STEPS); unsigned int* hostTotal; cudaMallocHost((void**) &hostTotal,sizeof(long)*l->STEPS/THREAD_NUM); unsigned int* deviceTotal; cudaMalloc((void**) &deviceTotal,sizeof(long)*l->STEPS/THREAD_NUM); unsigned int* hostUnique; cudaMallocHost((void**) &hostUnique,sizeof(long)*l->STEPS/THREAD_NUM); unsigned int* deviceUnique; cudaMalloc((void**) &deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM); // unsigned int* hostBoard; cudaMallocHost((void**) &hostBoard,sizeof(int)*l->STEPS*mark); unsigned int* deviceBoard; cudaMalloc((void**) &deviceBoard,sizeof(int)*l->STEPS*mark); // hostLocal[0].BOUND1=l->BOUND1; hostLocal[0].BOUND2=l->BOUND2; hostLocal[0].TOPBIT=l->TOPBIT; hostLocal[0].ENDBIT=l->ENDBIT; hostLocal[0].SIDEMASK=l->SIDEMASK; hostLocal[0].LASTMASK=l->LASTMASK; hostLocal[0].STEPS=l->STEPS; //12行目までは3行目までCPU->row==mark以下で 3行目までの //down,left,right情報をhostDown ,hostLeft,hostRight //に格納 //する->3行目以降をGPUマルチスレッドで実行し結果を取得 //13行目以降はCPUで実行する行数が1個ずつ増えて行く //例えばn15だとrow=5までCPUで実行し、 //それ以降はGPU(現在の設定だとGPUでは最大10行実行する //ようになっている) unsigned int rowP=0; unsigned long total=0; unsigned long unique=0; while(row>=h_mark) { //bitmap[row]=00000000 クイーンを //どこにも置けないので1行上に戻る if(bitmap[row]==0){ row--; }else{ //おける場所があれば進む if(row<l->BOUND1){ //【枝刈り】上部サイド枝刈り bitmap[row]&=~l->SIDEMASK; }else if(row==l->BOUND2) { //【枝刈り】下部サイド枝刈り if((down[row]&l->SIDEMASK)==0){ row--; } if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; } } bitmap[row]^=l->board[row]=bit=(-bitmap[row]&bitmap[row]); if((bit&mask)!=0){ //置く場所があれば先に進む rowP=row+1; down[rowP]=down[row]|bit; left[rowP]=(left[row]|bit)<<1; right[rowP]=(right[row]|bit)>>1; bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]); row++; if(row==mark){ //3行目(mark)にクイーンを1個ずつ置いていって、 //down,left,right情報を格納、 //その次の行へは進まない。その行で可能な場所にクイー //ン置き終わったらGPU並列実行 //totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す //row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を //hostDown,hostLeft,hostRightに格納する hostCond[totalCond].DOWN=down[row]; hostCond[totalCond].LEFT=left[row]; hostCond[totalCond].RIGHT=right[row]; for(int i=0;i<mark;i++){ hostBoard[totalCond*mark+i]=l->board[i]; } //スレッド数をインクリメントする totalCond++; //最大GPU数に達してしまったら一旦ここでGPUを実行する。STEPSはGPUの同 //時並行稼働数を制御 //nの数が少ないうちはtotalCondがSTEPSを超えることはないがnの数が増え //て行くと超えるようになる。 //ここではtotalCond==STEPSの場合だけこの中へ if(totalCond==l->STEPS){ //matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか //ら出たらmatched=trueになってる if(matched){ // デバイスからホストへ転送 cudaMemcpy(hostTotal, deviceTotal, sizeof(long)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(hostUnique,deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); // 集計 for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } matched=false; } // ホストからデバイスへ転送 cudaMemcpy(deviceBoard,hostBoard,sizeof(int)*totalCond*mark, cudaMemcpyHostToDevice); cudaMemcpy(deviceCond, hostCond, sizeof(struct cond)*l->STEPS, cudaMemcpyHostToDevice); cudaMemcpy(deviceLocal,hostLocal,sizeof(struct local)*l->STEPS, cudaMemcpyHostToDevice); // CUDA起動 BitBoard_cuda_kernel_b2 <<<l->STEPS/THREAD_NUM,THREAD_NUM >>> (size,size-mark,deviceTotal,deviceUnique,totalCond,deviceBoard,row,deviceCond,deviceLocal); //STEPS数の数だけマルチスレッドで起動するのだが、実際に計算が行われ //るのはtotalCondの数だけでそれ以外は空回しになる //GPU内でカウントしているので、GPUから出たらmatched=trueになってる matched=true; //totalCond==STEPSルートでGPUを実行したらスレッドをまた0から開始す //る(これによりなんどもSTEPS数分だけGPUを起動できる) totalCond=0; } //hostDown,hostLeft,hostRightに情報を格納したら1行上に上がる //これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて //hostDown,hostLeft,hostRightに情報を格納する row--; } }else{ //置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に //nqueenをやる row--; } } } //matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら //matched=trueになってる if(matched){ // デバイスからホストへ転送 cudaMemcpy(hostTotal, deviceTotal, sizeof(long)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(hostUnique,deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); // 集計 for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } matched=false; } // ホストからデバイスへ転送 cudaMemcpy(deviceBoard, hostBoard,sizeof(int)*totalCond*mark, cudaMemcpyHostToDevice); cudaMemcpy(deviceCond, hostCond, sizeof(struct cond)*totalCond, cudaMemcpyHostToDevice); cudaMemcpy(deviceLocal, hostLocal,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyHostToDevice); //size-mark は何行GPUを実行するか totalCondはスレッド数 //STEPS数の数だけマルチスレッドで起動するのだが、実際に計算が行われるのは //totalCondの数だけでそれ以外は空回しになる // CUDA起動 BitBoard_cuda_kernel_b2 <<<l->STEPS/THREAD_NUM,THREAD_NUM >>> (size,size-mark,deviceTotal,deviceUnique,totalCond,deviceBoard,mark,deviceCond,deviceLocal); // デバイスからホストへ転送 cudaMemcpy(hostTotal, deviceTotal, sizeof(long)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(hostUnique,deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); // 集計 for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } TOTAL+=total; UNIQUE+=unique; cudaFree(deviceTotal); cudaFree(deviceUnique); cudaFree(deviceBoard); cudaFree(deviceCond); cudaFree(deviceLocal); cudaFreeHost(hostTotal); cudaFreeHost(hostUnique); cudaFreeHost(hostBoard); cudaFreeHost(hostCond); cudaFreeHost(hostLocal); } // GPU -n Qが角にある void BitBoard_backTrack1G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l) { //何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く /***08 クイーンを2行目まで固定で置くためmarkが3以上必要*********************/ const unsigned int mark=size>12?size-10:3; const unsigned int h_mark=row; const unsigned int mask=(1<<size)-1; unsigned long totalCond=0; bool matched=false; //host unsigned int down[32]; down[row]=_down; unsigned int right[32]; right[row]=_right; unsigned int left[32]; left[row]=_left; //bitmapを配列で持つことにより //stackを使わないで1行前に戻れる unsigned int bitmap[32]; bitmap[row]=mask&~(left[row]|down[row]|right[row]); unsigned int bit; /** unsigned int* hostDown; cudaMallocHost((void**) &hostDown,sizeof(int)*l->STEPS); unsigned int* hostLeft; cudaMallocHost((void**) &hostLeft,sizeof(int)*l->STEPS); unsigned int* hostRight; cudaMallocHost((void**) &hostRight,sizeof(int)*l->STEPS); unsigned int* deviceDown; cudaMalloc((void**) &deviceDown,sizeof(int)*l->STEPS); unsigned int* deviceLeft; cudaMalloc((void**) &deviceLeft,sizeof(int)*l->STEPS); unsigned int* deviceRight; cudaMalloc((void**) &deviceRight,sizeof(int)*l->STEPS); */ struct local* hostLocal; cudaMallocHost((void**) &hostLocal,sizeof(struct local)*l->STEPS); struct local* deviceLocal; cudaMallocHost((void**) &deviceLocal,sizeof(struct local)*l->STEPS); struct cond* hostCond; cudaMallocHost((void**) &hostCond,sizeof(struct cond)*l->STEPS); struct cond* deviceCond; cudaMallocHost((void**) &deviceCond,sizeof(struct cond)*l->STEPS); unsigned int* hostTotal; cudaMallocHost((void**) &hostTotal,sizeof(int)*l->STEPS/THREAD_NUM); unsigned int* hostUnique; cudaMallocHost((void**) &hostUnique,sizeof(int)*l->STEPS/THREAD_NUM); unsigned int* deviceTotal; cudaMalloc((void**) &deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM); unsigned int* deviceUnique; cudaMalloc((void**) &deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM); hostLocal[0].BOUND1=l->BOUND1; hostLocal[0].BOUND2=l->BOUND2; hostLocal[0].TOPBIT=l->TOPBIT; hostLocal[0].ENDBIT=l->ENDBIT; hostLocal[0].SIDEMASK=l->SIDEMASK; hostLocal[0].LASTMASK=l->LASTMASK; hostLocal[0].STEPS=l->STEPS; //12行目までは3行目までCPU->row==mark以下で 3行目までの //down,left,right情報を hostDown,hostLeft,hostRight //に格納 //する->3行目以降をGPUマルチスレッドで実行し結果を取得 //13行目以降はCPUで実行する行数が1個ずつ増えて行く //例えばn15だとrow=5までCPUで実行し、 //それ以降はGPU(現在の設定だとGPUでは最大10行実行する //ようになっている) //while(row>=0) { int rowP=0; unsigned long total=0; unsigned long unique=0; while(row>=h_mark) { //bitmap[row]=00000000 クイーンを //どこにも置けないので1行上に戻る //06GPU こっちのほうが優秀 if(bitmap[row]==0){ row--; } else{//おける場所があれば進む if(row<l->BOUND1) { /***11 枝刈り*********************/ bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等) } bitmap[row]^=bit=(-bitmap[row]&bitmap[row]); if((bit&mask)!=0){//置く場所があれば先に進む rowP=row+1; down[rowP]=down[row]|bit; left[rowP]=(left[row]|bit)<<1; right[rowP]=(right[row]|bit)>>1; bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]); row++; if(row==mark){ //3行目(mark)にクイーンを1個ずつ置いていって、 //down,left,right情報を格納、 //その次の行へは進まない。その行で可能な場所にクイー //ン置き終わったらGPU並列実行 //totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す //row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を //hostDown,hostLeft,hostRightに格納する /** hostDown[totalCond]=down[row]; hostLeft[totalCond]=left[row]; hostRight[totalCond]=right[row]; */ hostCond[totalCond].DOWN=down[row]; hostCond[totalCond].LEFT=left[row]; hostCond[totalCond].RIGHT=right[row]; //スレッド数をインクリメントする totalCond++; //最大GPU数に達してしまったら一旦ここでGPUを実行する。STEPSはGPUの同 //時並行稼働数を制御 //nの数が少ないうちはtotalCondがSTEPSを超えることはないがnの数が増え //て行くと超えるようになる。 //ここではtotalCond==STEPSの場合だけこの中へ if(totalCond==l->STEPS){ //matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか //ら出たらmatched=trueになってる if(matched){ // デバイスからホストへ転送 cudaMemcpy(hostTotal, deviceTotal, sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); // 集計 for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } matched=false; } // ホストからデバイスへ転送 /** cudaMemcpy(deviceDown, hostDown, sizeof(int)*totalCond, cudaMemcpyHostToDevice); cudaMemcpy(deviceLeft, hostLeft, sizeof(int)*totalCond, cudaMemcpyHostToDevice); cudaMemcpy(deviceRight,hostRight,sizeof(int)*totalCond, cudaMemcpyHostToDevice); */ cudaMemcpy(deviceCond, hostCond, sizeof(struct cond)*l->STEPS, cudaMemcpyHostToDevice); cudaMemcpy(deviceLocal,hostLocal,sizeof(struct local)*l->STEPS, cudaMemcpyHostToDevice); // CUDA起動 BitBoard_cuda_kernel_b1 <<<l->STEPS/THREAD_NUM,THREAD_NUM >>> (size,size-mark,deviceTotal,deviceUnique,totalCond,row,deviceCond,deviceLocal); //STEPS数の数だけマルチスレッドで起動するのだが、実際に計算が行われ //るのはtotalCondの数だけでそれ以外は空回しになる //GPU内でカウントしているので、GPUから出たらmatched=trueになってる matched=true; //totalCond==STEPSルートでGPUを実行したらスレッドをまた0から開始す //る(これによりなんどもSTEPS数分だけGPUを起動できる) totalCond=0; } //hostDown,hostLeft,hostRightに情報を格納したら1行上に上がる //これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて //hostDown,hostLeft,hostRightに情報を格納する row--; } }else{ //置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に //nqueenをやる row--; } } } //matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら //matched=trueになってる if(matched){ // デバイスからホストへ転送 cudaMemcpy(hostTotal, deviceTotal, sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); // 集計 for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } matched=false; } // ホストからデバイスへ転送 /* cudaMemcpy(deviceDown, hostDown, sizeof(int)*totalCond, cudaMemcpyHostToDevice); cudaMemcpy(deviceLeft, hostLeft, sizeof(int)*totalCond, cudaMemcpyHostToDevice); cudaMemcpy(deviceRight,hostRight,sizeof(int)*totalCond, cudaMemcpyHostToDevice); */ cudaMemcpy(deviceCond, hostCond, sizeof(struct cond)*totalCond, cudaMemcpyHostToDevice); cudaMemcpy(deviceLocal,hostLocal,sizeof(int)*l->STEPS/THREAD_NUM, cudaMemcpyHostToDevice); // CUDA起動 BitBoard_cuda_kernel_b1 <<<l->STEPS/THREAD_NUM,THREAD_NUM >>> //(size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,mark,deviceCond,deviceLocal); (size,size-mark,deviceTotal,deviceUnique,totalCond,mark,deviceCond,deviceLocal); /* // デバイスからホストへ転送 */ cudaMemcpy(hostTotal, deviceTotal, sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost); // 集計 for(int col=0;col<l->STEPS/THREAD_NUM;col++){ total+=hostTotal[col]; unique+=hostUnique[col]; } TOTAL+=total; UNIQUE+=unique; //開放 /* cudaFree(deviceDown); cudaFree(deviceLeft); cudaFree(deviceRight); */ cudaFree(deviceTotal); cudaFree(deviceUnique); cudaFree(deviceCond); cudaFree(deviceLocal); /* cudaFreeHost(hostDown); cudaFreeHost(hostLeft); cudaFreeHost(hostRight); */ cudaFreeHost(hostTotal); cudaFreeHost(hostUnique); cudaFreeHost(hostCond); cudaFreeHost(hostLocal); } // GPU -n ビットボードの実行 角にQがある・ないの分岐を行う void BitBoard_build(const unsigned int size,int STEPS) { if(size<=0||size>32){return;} /** int型は unsigned とする total: グローバル変数TOTALへのアクセスを極小化する */ struct local l; //GPU で扱う構造体 l.STEPS=STEPS; unsigned int bit=1; l.board[0]=1; unsigned int left=bit<<1,down=bit,right=bit>>1; /** 2行目は右から3列目から左端から2列目まで */ for(l.BOUND1=2;l.BOUND1<size-1;l.BOUND1++){ l.board[1]=bit=(1<<l.BOUND1); BitBoard_backTrack1G(size,2,(left|bit)<<1,(down|bit),(right|bit)>>1,&l); } l.TOPBIT=1<<(size-1); l.SIDEMASK=l.LASTMASK=(l.TOPBIT|1); l.ENDBIT=(l.TOPBIT>>1); /** 1行目右から2列目から 偶数個は1/2 n=8 なら 1,2,3 奇数個は1/2+1 n=9 なら 1,2,3,4 */ for(l.BOUND1=1,l.BOUND2=size-1-1;l.BOUND1<l.BOUND2;l.BOUND1++,l.BOUND2--){ l.board[0]=bit=(1<<l.BOUND1); BitBoard_backTrack2G(size,1,bit<<1,bit,bit>>1,&l); l.LASTMASK|=l.LASTMASK>>1|l.LASTMASK<<1; l.ENDBIT>>=1; } } // CUDA 初期化 bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} unsigned int i; for(i=0;i<count;++i){ struct cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} cudaSetDevice(i); return true; } //メイン int main(int argc,char** argv) { bool cpu=false,cpur=false,gpu=false,gpuBitBoard=false; unsigned int argstart=2; if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuBitBoard=true;} else{ gpuBitBoard=true; } //デフォルトをgpuとする argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s] n STEPS\n",argv[0]); printf(" -r: CPU 再帰\n"); printf(" -c: CPU 非再帰\n"); printf(" -g: GPU 再帰\n"); printf(" -n: GPU ビットボード\n"); } if(cpur){ printf("\n\n対称解除法 再帰 \n"); } else if(cpu){ printf("\n\n対称解除法 非再帰 \n"); } else if(gpu){ printf("\n\n対称解除法 GPU\n"); } else if(gpuBitBoard){ printf("\n\n対称解除法 GPUビットボード \n"); } if(cpu||cpur) { unsigned int min=4; unsigned int targetN=17; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(unsigned int size=min;size<=targetN;size++){ local l; gettimeofday(&t0,NULL);//計測開始 if(cpur){ //再帰 symmetry_R(size,&l); } if(cpu){ //非再帰 symmetry_NR(size,&l); } // gettimeofday(&t1,NULL);//計測終了 unsigned int ss; unsigned int ms; unsigned int dd; if(t1.tv_usec<t0.tv_usec) { dd=(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; }else { dd=(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if unsigned int hh=ss/3600; unsigned int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); } //end for }//end if if(gpu||gpuBitBoard) { int STEPS=24576; if(!InitCUDA()){return 0;} unsigned int min=4; unsigned int targetN=21; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(unsigned int size=min;size<=targetN;size++){ gettimeofday(&t0,NULL); if(gpu){ TOTAL=UNIQUE=0; local l[MAX]; GPU_symmetry_R(size,&l[0]); TOTAL=l->TOTAL; UNIQUE=l->UNIQUE; }else if(gpuBitBoard){ TOTAL=UNIQUE=0; BitBoard_build(size,STEPS); } gettimeofday(&t1,NULL); unsigned int ss; unsigned int ms; unsigned int dd; if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if unsigned int hh=ss/3600; unsigned int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); } } return 0; }
a6a826e288626ca1be9c28481fa51d0a93f036b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void create_FF_full_HyPred ( float *FF_table, float *V, float c2_F, float *c2_H, int *Ele, float *FF_full, int num_q, int num_ele, int num_atom, int num_atom2) { __shared__ float FF_pt[7]; __shared__ float hydration[10]; for (int ii = blockIdx.x; ii < num_q; ii += gridDim.x) { // Get form factor for this block (or q vector) if (ii < num_q) { for (int jj = threadIdx.x; jj < num_ele + 1; jj += blockDim.x) { FF_pt[jj] = FF_table[ii*(num_ele+1)+jj]; } } __syncthreads(); for (int jj = threadIdx.x; jj < 10; jj += blockDim.x) { hydration[jj] = c2_F * c2_H[jj] * FF_pt[num_ele]; } __syncthreads(); // Calculate atomic form factor for this q for (int jj = threadIdx.x; jj < num_atom; jj += blockDim.x) { int atomt = Ele[jj]; if (atomt > 5) { // Which means this is a hydrogen FF_full[ii*num_atom2 + jj] = FF_pt[0]; FF_full[ii*num_atom2 + jj] += hydration[atomt] * V[jj]; } else { // Heavy atoms - do the same as before FF_full[ii*num_atom2 + jj] = FF_pt[atomt]; FF_full[ii*num_atom2 + jj] += hydration[atomt] * V[jj]; } } } }
a6a826e288626ca1be9c28481fa51d0a93f036b2.cu
#include "includes.h" __global__ void create_FF_full_HyPred ( float *FF_table, float *V, float c2_F, float *c2_H, int *Ele, float *FF_full, int num_q, int num_ele, int num_atom, int num_atom2) { __shared__ float FF_pt[7]; __shared__ float hydration[10]; for (int ii = blockIdx.x; ii < num_q; ii += gridDim.x) { // Get form factor for this block (or q vector) if (ii < num_q) { for (int jj = threadIdx.x; jj < num_ele + 1; jj += blockDim.x) { FF_pt[jj] = FF_table[ii*(num_ele+1)+jj]; } } __syncthreads(); for (int jj = threadIdx.x; jj < 10; jj += blockDim.x) { hydration[jj] = c2_F * c2_H[jj] * FF_pt[num_ele]; } __syncthreads(); // Calculate atomic form factor for this q for (int jj = threadIdx.x; jj < num_atom; jj += blockDim.x) { int atomt = Ele[jj]; if (atomt > 5) { // Which means this is a hydrogen FF_full[ii*num_atom2 + jj] = FF_pt[0]; FF_full[ii*num_atom2 + jj] += hydration[atomt] * V[jj]; } else { // Heavy atoms - do the same as before FF_full[ii*num_atom2 + jj] = FF_pt[atomt]; FF_full[ii*num_atom2 + jj] += hydration[atomt] * V[jj]; } } } }
5111a37e751b49f807a746e028a064529754ce61.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TILE_WIDTH 16 #include <stdio.h> #include "timer.h" //@@ Compute C = A * B __global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } int main(int argc, char ** argv) { GpuTimer timer; float * hostA; // The A matrix float * hostB; // The B matrix float * hostC; // The output C matrix float * deviceA; float * deviceB; float * deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) // Conseguir matrices de entrada. Random if (argc != 5){ fprintf(stderr,"%s numrowsA numcolumnsA numrowsB numcolumnsB\n", argv[0]); return 1; } numARows = atoi(argv[1]); numAColumns = atoi(argv[2]); numBRows = atoi(argv[3]); numBColumns = atoi(argv[4]); //@@ Set numCRows and numCColumns // Initialize host memory const float valB = 0.01f; hostA = (float *) malloc(numARows * numAColumns * sizeof(float)); hostB = (float *) malloc(numBRows * numBColumns * sizeof(float)); constantInit(hostA, numARows*numAColumns, 1.0f); constantInit(hostB, numBRows*numBColumns, valB); //@@ Allocate the hostC matrix //@@ Allocate GPU memory here //@@ Copy memory to the GPU here //@@ Initialize the grid and block dimensions here timer.Start(); //@@ Launch the GPU Kernel here hipDeviceSynchronize(); timer.Stop(); //@@ Copy the GPU memory back to the CPU here //@@ Free the GPU memory here printf("Checking computed result for correctness: "); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6 ; // machine zero for (int i = 0; i < (int)(numCRows * numCColumns); i++) { double abs_err = fabs(hostC[i] - (numAColumns * valB)); double dot_length = numAColumns; double abs_val = fabs(hostC[i]); double rel_err = abs_err/abs_val/dot_length ; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, hostC[i], numAColumns*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); free(hostA); free(hostB); free(hostC); return 0; }
5111a37e751b49f807a746e028a064529754ce61.cu
#define TILE_WIDTH 16 #include <stdio.h> #include "timer.h" //@@ Compute C = A * B __global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } int main(int argc, char ** argv) { GpuTimer timer; float * hostA; // The A matrix float * hostB; // The B matrix float * hostC; // The output C matrix float * deviceA; float * deviceB; float * deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) // Conseguir matrices de entrada. Random if (argc != 5){ fprintf(stderr,"%s numrowsA numcolumnsA numrowsB numcolumnsB\n", argv[0]); return 1; } numARows = atoi(argv[1]); numAColumns = atoi(argv[2]); numBRows = atoi(argv[3]); numBColumns = atoi(argv[4]); //@@ Set numCRows and numCColumns // Initialize host memory const float valB = 0.01f; hostA = (float *) malloc(numARows * numAColumns * sizeof(float)); hostB = (float *) malloc(numBRows * numBColumns * sizeof(float)); constantInit(hostA, numARows*numAColumns, 1.0f); constantInit(hostB, numBRows*numBColumns, valB); //@@ Allocate the hostC matrix //@@ Allocate GPU memory here //@@ Copy memory to the GPU here //@@ Initialize the grid and block dimensions here timer.Start(); //@@ Launch the GPU Kernel here cudaThreadSynchronize(); timer.Stop(); //@@ Copy the GPU memory back to the CPU here //@@ Free the GPU memory here printf("Checking computed result for correctness: "); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6 ; // machine zero for (int i = 0; i < (int)(numCRows * numCColumns); i++) { double abs_err = fabs(hostC[i] - (numAColumns * valB)); double dot_length = numAColumns; double abs_val = fabs(hostC[i]); double rel_err = abs_err/abs_val/dot_length ; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, hostC[i], numAColumns*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); free(hostA); free(hostB); free(hostC); return 0; }
1d253483e59cd19e5b45dc58e54b413bfbdea6e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/relu_op.h" namespace caffe2 { namespace { template <typename T> __global__ void ReluKernel(const int N, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = X[i] > 0 ? X[i] : 0; } } template <typename T> __global__ void ReluGradientKernel(const int N, const T* Y, const T* dY, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = Y[i] > 0 ? dY[i] : 0; } } } // namespace template <> bool ReluOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); CAFFE_ENFORCE_GT(X.size(), 0); Y->ResizeLike(X); hipLaunchKernelGGL(( ReluKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y->mutable_data<float>()); return true; } template <> bool ReluGradientOp<float, CUDAContext>::RunOnDevice() { auto& Y = Input(0); auto& dY = Input(1); auto* dX = Output(0); CAFFE_ENFORCE_GT(Y.size(), 0); CAFFE_ENFORCE_EQ(dY.size(), Y.size()); dX->ResizeLike(Y); hipLaunchKernelGGL(( ReluGradientKernel), dim3(CAFFE_GET_BLOCKS(Y.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), Y.size(), Y.data<float>(), dY.data<float>(), dX->mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(Relu, ReluOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(ReluGradient, ReluGradientOp<float, CUDAContext>); } // namespace caffe2
1d253483e59cd19e5b45dc58e54b413bfbdea6e5.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/relu_op.h" namespace caffe2 { namespace { template <typename T> __global__ void ReluKernel(const int N, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = X[i] > 0 ? X[i] : 0; } } template <typename T> __global__ void ReluGradientKernel(const int N, const T* Y, const T* dY, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = Y[i] > 0 ? dY[i] : 0; } } } // namespace template <> bool ReluOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); CAFFE_ENFORCE_GT(X.size(), 0); Y->ResizeLike(X); ReluKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y->mutable_data<float>()); return true; } template <> bool ReluGradientOp<float, CUDAContext>::RunOnDevice() { auto& Y = Input(0); auto& dY = Input(1); auto* dX = Output(0); CAFFE_ENFORCE_GT(Y.size(), 0); CAFFE_ENFORCE_EQ(dY.size(), Y.size()); dX->ResizeLike(Y); ReluGradientKernel<<<CAFFE_GET_BLOCKS(Y.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( Y.size(), Y.data<float>(), dY.data<float>(), dX->mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(Relu, ReluOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(ReluGradient, ReluGradientOp<float, CUDAContext>); } // namespace caffe2
e5c97813322c1e34ff2cc7fa030e8b3198c99678.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <array> #include <iostream> #include "CudaUtils.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "util/StringConcat.h" #define WORK_WIDTH 10 #define WORK_HEIGHT 1 #define BLOCK_WIDTH 3 #define BLOCK_HEIGHT 1 #define WORK_TOTAL WORK_WIDTH * WORK_HEIGHT dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); dim3 dimGrid(ceil(WORK_WIDTH / (float) dimBlock.x), ceil(WORK_HEIGHT / (float) dimBlock.y)); template <typename T> __global__ void compute(T* devSrc, T* devRes, size_t length, size_t offset) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= length) { printf("Thread %d returns prematurely\n", idx); return; } devRes[(idx + offset) % length] = devSrc[idx]; printf("Thread %d moving %d to %d\n", idx, idx, (idx + offset) % length); } int main() { printf("Kernel will be invoked with: Block(%d,%d), Grid(%d,%d)\n", dimBlock.x, dimBlock.y, dimGrid.x, dimGrid.y); std::array<float, WORK_TOTAL> src {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; std::array<float, WORK_TOTAL> res; runWithProfiler([&] { CudaBuffer<float> devSrc {src}; CudaBuffer<float> devRes {WORK_TOTAL}; hipLaunchKernelGGL(( compute<float>) , dim3(dimGrid), dim3(dimBlock), 0, 0, devSrc, devRes, WORK_TOTAL, 5); devRes.copyTo(res); }); // Print the results for (int col = 0; col < WORK_TOTAL; ++col) { std::cout << res[col] << std::endl; } }
e5c97813322c1e34ff2cc7fa030e8b3198c99678.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <array> #include <iostream> #include "CudaUtils.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "util/StringConcat.h" #define WORK_WIDTH 10 #define WORK_HEIGHT 1 #define BLOCK_WIDTH 3 #define BLOCK_HEIGHT 1 #define WORK_TOTAL WORK_WIDTH * WORK_HEIGHT dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); dim3 dimGrid(ceil(WORK_WIDTH / (float) dimBlock.x), ceil(WORK_HEIGHT / (float) dimBlock.y)); template <typename T> __global__ void compute(T* devSrc, T* devRes, size_t length, size_t offset) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= length) { printf("Thread %d returns prematurely\n", idx); return; } devRes[(idx + offset) % length] = devSrc[idx]; printf("Thread %d moving %d to %d\n", idx, idx, (idx + offset) % length); } int main() { printf("Kernel will be invoked with: Block(%d,%d), Grid(%d,%d)\n", dimBlock.x, dimBlock.y, dimGrid.x, dimGrid.y); std::array<float, WORK_TOTAL> src {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; std::array<float, WORK_TOTAL> res; runWithProfiler([&] { CudaBuffer<float> devSrc {src}; CudaBuffer<float> devRes {WORK_TOTAL}; compute<float> <<<dimGrid, dimBlock>>> (devSrc, devRes, WORK_TOTAL, 5); devRes.copyTo(res); }); // Print the results for (int col = 0; col < WORK_TOTAL; ++col) { std::cout << res[col] << std::endl; } }
d3d374f2508932c655b77aeaa78b1b85f4b3d669.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdint.h> #include <vector> #include <chrono> #include "linearprobing.h" // 32 bit Murmur3 hash __device__ uint32_t hash(uint32_t k) { k ^= k >> 16; k *= 0x85ebca6b; k ^= k >> 13; k *= 0xc2b2ae35; k ^= k >> 16; return k & (kHashTableCapacity-1); } // Insert the key/values in kvs into the hashtable __global__ void k_hashtable_insert(KeyValue*__restrict__ hashtable, const KeyValue*__restrict__ kvs, unsigned int numkvs) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numkvs) { uint32_t key = kvs[tid].key; uint32_t value = kvs[tid].value; uint32_t slot = hash(key); while (true) { uint32_t prev = atomicCAS(&hashtable[slot].key, kEmpty, key); if (prev == kEmpty || prev == key) { hashtable[slot].value = value; return; } slot = (slot + 1) & (kHashTableCapacity-1); } } } double insert_hashtable(KeyValue* pHashTable, const KeyValue* kvs, uint32_t num_kvs) { // Insert all the keys into the hash table const int threadblocksize = 256; int gridsize = ((uint32_t)num_kvs + threadblocksize - 1) / threadblocksize; hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( k_hashtable_insert), dim3(gridsize), dim3(threadblocksize), 0, 0, pHashTable, kvs, (uint32_t)num_kvs); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); return time; } // Delete each key in kvs from the hash table, if the key exists // A deleted key is left in the hash table, but its value is set to kEmpty // Deleted keys are not reused; once a key is assigned a slot, it never moves __global__ void k_hashtable_delete(KeyValue*__restrict__ hashtable, const KeyValue*__restrict__ kvs, unsigned int numkvs) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numkvs) { uint32_t key = kvs[tid].key; uint32_t slot = hash(key); while (true) { if (hashtable[slot].key == key) { hashtable[slot].value = kEmpty; return; } if (hashtable[slot].key == kEmpty) { return; } slot = (slot + 1) & (kHashTableCapacity - 1); } } } double delete_hashtable(KeyValue* pHashTable, const KeyValue* kvs, uint32_t num_kvs) { // Insert all the keys into the hash table const int threadblocksize = 256; int gridsize = ((uint32_t)num_kvs + threadblocksize - 1) / threadblocksize; hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( k_hashtable_delete), dim3(gridsize), dim3(threadblocksize), 0, 0, pHashTable, kvs, (uint32_t)num_kvs); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); return time; } // Iterate over every item in the hashtable; return non-empty key/values __global__ void k_iterate_hashtable(KeyValue*__restrict__ pHashTable, KeyValue*__restrict__ kvs, uint32_t* kvs_size) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < kHashTableCapacity) { if (pHashTable[tid].key != kEmpty) { uint32_t value = pHashTable[tid].value; if (value != kEmpty) { uint32_t size = atomicAdd(kvs_size, 1); kvs[size] = pHashTable[tid]; } } } } std::vector<KeyValue> iterate_hashtable(KeyValue* pHashTable) { uint32_t* device_num_kvs; hipMalloc((void**) &device_num_kvs, sizeof(uint32_t)); hipMemset(device_num_kvs, 0, sizeof(uint32_t)); KeyValue* device_kvs; hipMalloc((void**) &device_kvs, sizeof(KeyValue) * kNumKeyValues); const int threadblocksize = 256; int gridsize = (kHashTableCapacity + threadblocksize - 1) / threadblocksize; hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( k_iterate_hashtable), dim3(gridsize), dim3(threadblocksize), 0, 0, pHashTable, device_kvs, device_num_kvs); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Kernel execution time (iterate): %f (s)\n", time * 1e-9f); uint32_t num_kvs; hipMemcpy(&num_kvs, device_num_kvs, sizeof(uint32_t), hipMemcpyDeviceToHost); std::vector<KeyValue> kvs; kvs.resize(num_kvs); hipMemcpy(kvs.data(), device_kvs, sizeof(KeyValue) * num_kvs, hipMemcpyDeviceToHost); hipFree(device_kvs); hipFree(device_num_kvs); return kvs; }
d3d374f2508932c655b77aeaa78b1b85f4b3d669.cu
#include <stdio.h> #include <stdint.h> #include <vector> #include <chrono> #include "linearprobing.h" // 32 bit Murmur3 hash __device__ uint32_t hash(uint32_t k) { k ^= k >> 16; k *= 0x85ebca6b; k ^= k >> 13; k *= 0xc2b2ae35; k ^= k >> 16; return k & (kHashTableCapacity-1); } // Insert the key/values in kvs into the hashtable __global__ void k_hashtable_insert(KeyValue*__restrict__ hashtable, const KeyValue*__restrict__ kvs, unsigned int numkvs) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numkvs) { uint32_t key = kvs[tid].key; uint32_t value = kvs[tid].value; uint32_t slot = hash(key); while (true) { uint32_t prev = atomicCAS(&hashtable[slot].key, kEmpty, key); if (prev == kEmpty || prev == key) { hashtable[slot].value = value; return; } slot = (slot + 1) & (kHashTableCapacity-1); } } } double insert_hashtable(KeyValue* pHashTable, const KeyValue* kvs, uint32_t num_kvs) { // Insert all the keys into the hash table const int threadblocksize = 256; int gridsize = ((uint32_t)num_kvs + threadblocksize - 1) / threadblocksize; cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); k_hashtable_insert<<<gridsize, threadblocksize>>>(pHashTable, kvs, (uint32_t)num_kvs); cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); return time; } // Delete each key in kvs from the hash table, if the key exists // A deleted key is left in the hash table, but its value is set to kEmpty // Deleted keys are not reused; once a key is assigned a slot, it never moves __global__ void k_hashtable_delete(KeyValue*__restrict__ hashtable, const KeyValue*__restrict__ kvs, unsigned int numkvs) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numkvs) { uint32_t key = kvs[tid].key; uint32_t slot = hash(key); while (true) { if (hashtable[slot].key == key) { hashtable[slot].value = kEmpty; return; } if (hashtable[slot].key == kEmpty) { return; } slot = (slot + 1) & (kHashTableCapacity - 1); } } } double delete_hashtable(KeyValue* pHashTable, const KeyValue* kvs, uint32_t num_kvs) { // Insert all the keys into the hash table const int threadblocksize = 256; int gridsize = ((uint32_t)num_kvs + threadblocksize - 1) / threadblocksize; cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); k_hashtable_delete<<<gridsize, threadblocksize>>>(pHashTable, kvs, (uint32_t)num_kvs); cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); return time; } // Iterate over every item in the hashtable; return non-empty key/values __global__ void k_iterate_hashtable(KeyValue*__restrict__ pHashTable, KeyValue*__restrict__ kvs, uint32_t* kvs_size) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < kHashTableCapacity) { if (pHashTable[tid].key != kEmpty) { uint32_t value = pHashTable[tid].value; if (value != kEmpty) { uint32_t size = atomicAdd(kvs_size, 1); kvs[size] = pHashTable[tid]; } } } } std::vector<KeyValue> iterate_hashtable(KeyValue* pHashTable) { uint32_t* device_num_kvs; cudaMalloc((void**) &device_num_kvs, sizeof(uint32_t)); cudaMemset(device_num_kvs, 0, sizeof(uint32_t)); KeyValue* device_kvs; cudaMalloc((void**) &device_kvs, sizeof(KeyValue) * kNumKeyValues); const int threadblocksize = 256; int gridsize = (kHashTableCapacity + threadblocksize - 1) / threadblocksize; cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); k_iterate_hashtable<<<gridsize, threadblocksize>>>(pHashTable, device_kvs, device_num_kvs); cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Kernel execution time (iterate): %f (s)\n", time * 1e-9f); uint32_t num_kvs; cudaMemcpy(&num_kvs, device_num_kvs, sizeof(uint32_t), cudaMemcpyDeviceToHost); std::vector<KeyValue> kvs; kvs.resize(num_kvs); cudaMemcpy(kvs.data(), device_kvs, sizeof(KeyValue) * num_kvs, cudaMemcpyDeviceToHost); cudaFree(device_kvs); cudaFree(device_num_kvs); return kvs; }
c2134b924917a085601c142791efec6a12001961.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel_z3 [4][2]; static int dims_advec_mom_kernel_z3_h [4][2] = {0}; //user function __device__ inline void advec_mom_kernel_z3_gpu(ACC<double> &pre_vol, ACC<double> &post_vol, const ACC<double> &volume, const ACC<double> &vol_flux_z) { post_vol(0,0,0) = volume(0,0,0); pre_vol(0,0,0) = post_vol(0,0,0) + vol_flux_z(0,0,1) - vol_flux_z(0,0,0); } __global__ void ops_advec_mom_kernel_z3( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[0][0] * dims_advec_mom_kernel_z3[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[1][0] * dims_advec_mom_kernel_z3[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[2][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[2][0] * dims_advec_mom_kernel_z3[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[3][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[3][0] * dims_advec_mom_kernel_z3[3][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_advec_mom_kernel_z3[0][0], dims_advec_mom_kernel_z3[0][1], arg0); ACC<double> argp1(dims_advec_mom_kernel_z3[1][0], dims_advec_mom_kernel_z3[1][1], arg1); const ACC<double> argp2(dims_advec_mom_kernel_z3[2][0], dims_advec_mom_kernel_z3[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel_z3[3][0], dims_advec_mom_kernel_z3[3][1], arg3); advec_mom_kernel_z3_gpu(argp0, argp1, argp2, argp3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel_z3_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,126)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(126,"advec_mom_kernel_z3"); OPS_kernels[126].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != dims_advec_mom_kernel_z3_h[0][0] || ydim0 != dims_advec_mom_kernel_z3_h[0][1] || xdim1 != dims_advec_mom_kernel_z3_h[1][0] || ydim1 != dims_advec_mom_kernel_z3_h[1][1] || xdim2 != dims_advec_mom_kernel_z3_h[2][0] || ydim2 != dims_advec_mom_kernel_z3_h[2][1] || xdim3 != dims_advec_mom_kernel_z3_h[3][0] || ydim3 != dims_advec_mom_kernel_z3_h[3][1]) { dims_advec_mom_kernel_z3_h[0][0] = xdim0; dims_advec_mom_kernel_z3_h[0][1] = ydim0; dims_advec_mom_kernel_z3_h[1][0] = xdim1; dims_advec_mom_kernel_z3_h[1][1] = ydim1; dims_advec_mom_kernel_z3_h[2][0] = xdim2; dims_advec_mom_kernel_z3_h[2][1] = ydim2; dims_advec_mom_kernel_z3_h[3][0] = xdim3; dims_advec_mom_kernel_z3_h[3][1] = ydim3; cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel_z3, dims_advec_mom_kernel_z3_h, sizeof(dims_advec_mom_kernel_z3))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[126].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel_z3), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[126].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[126].mpi_time += t2-t1; OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 126; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 126; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel_z3_execute; if (OPS_diags > 1) { ops_timing_realloc(126,"advec_mom_kernel_z3"); } ops_enqueue_kernel(desc); } #endif
c2134b924917a085601c142791efec6a12001961.cu
// // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel_z3 [4][2]; static int dims_advec_mom_kernel_z3_h [4][2] = {0}; //user function __device__ inline void advec_mom_kernel_z3_gpu(ACC<double> &pre_vol, ACC<double> &post_vol, const ACC<double> &volume, const ACC<double> &vol_flux_z) { post_vol(0,0,0) = volume(0,0,0); pre_vol(0,0,0) = post_vol(0,0,0) + vol_flux_z(0,0,1) - vol_flux_z(0,0,0); } __global__ void ops_advec_mom_kernel_z3( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[0][0] * dims_advec_mom_kernel_z3[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[1][0] * dims_advec_mom_kernel_z3[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[2][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[2][0] * dims_advec_mom_kernel_z3[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_z3[3][0] + idx_z * 1*1 * dims_advec_mom_kernel_z3[3][0] * dims_advec_mom_kernel_z3[3][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_advec_mom_kernel_z3[0][0], dims_advec_mom_kernel_z3[0][1], arg0); ACC<double> argp1(dims_advec_mom_kernel_z3[1][0], dims_advec_mom_kernel_z3[1][1], arg1); const ACC<double> argp2(dims_advec_mom_kernel_z3[2][0], dims_advec_mom_kernel_z3[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel_z3[3][0], dims_advec_mom_kernel_z3[3][1], arg3); advec_mom_kernel_z3_gpu(argp0, argp1, argp2, argp3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel_z3_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,126)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(126,"advec_mom_kernel_z3"); OPS_kernels[126].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != dims_advec_mom_kernel_z3_h[0][0] || ydim0 != dims_advec_mom_kernel_z3_h[0][1] || xdim1 != dims_advec_mom_kernel_z3_h[1][0] || ydim1 != dims_advec_mom_kernel_z3_h[1][1] || xdim2 != dims_advec_mom_kernel_z3_h[2][0] || ydim2 != dims_advec_mom_kernel_z3_h[2][1] || xdim3 != dims_advec_mom_kernel_z3_h[3][0] || ydim3 != dims_advec_mom_kernel_z3_h[3][1]) { dims_advec_mom_kernel_z3_h[0][0] = xdim0; dims_advec_mom_kernel_z3_h[0][1] = ydim0; dims_advec_mom_kernel_z3_h[1][0] = xdim1; dims_advec_mom_kernel_z3_h[1][1] = ydim1; dims_advec_mom_kernel_z3_h[2][0] = xdim2; dims_advec_mom_kernel_z3_h[2][1] = ydim2; dims_advec_mom_kernel_z3_h[3][0] = xdim3; dims_advec_mom_kernel_z3_h[3][1] = ydim3; cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel_z3, dims_advec_mom_kernel_z3_h, sizeof(dims_advec_mom_kernel_z3))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[126].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel_z3<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[126].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[126].mpi_time += t2-t1; OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 126; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 126; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel_z3_execute; if (OPS_diags > 1) { ops_timing_realloc(126,"advec_mom_kernel_z3"); } ops_enqueue_kernel(desc); } #endif
6001449c49743c421b34927ab888779880ed15b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "n3ldg_cuda.h" #include <array> #include <cstdlib> #include <vector> #include <algorithm> #include <cmath> #include <cstdio> #include <rocblas.h> #include "cuPrintf_hip.cuh" #include "cuPrintf.hip" #include "memory_pool.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "cnmem.h" #include <string> #include <cstring> #include <cstdint> #include <chrono> #include <thread> #include <numeric> #include <memory> #include "profiler.h" namespace n3ldg_cuda { using std::cout; using std::endl; #if USE_FLOAT #define cuda_sqrt(x) sqrtf(x) #define cuda_pow(x, y) powf(x, y) #define cuda_tanh(x) tanhf(x) #define cuda_exp(x) __expf(x) #else #define cuda_sqrt(x) sqrt(x) #define cuda_pow(x, y) pow(x, y) #define cuda_tanh(x) tanh(x) #define cuda_exp(x) exp(x) #endif #define KERNEL_LOG #ifdef KERNEL_LOG #define KernelPrintLine(format, ...)\ {\ cuPrintf("block:x=%d,y=%d thread:x=%d,y=%d "#format"\n", blockIdx.x,\ blockIdx.y, threadIdx.x, threadIdx.y,__VA_ARGS__);\ } #else #define KernelPrintLine(format, ...) #endif constexpr int TPB = 1024; constexpr int BLOCK_COUNT = 56; void CallCuda(hipError_t status) { if (status != hipSuccess) { cout << hipGetErrorString(status) << endl; abort(); } } void CallCnmem(cnmemStatus_t status) { assert(status == CNMEM_STATUS_SUCCESS); } void CallCublas(hipblasStatus_t status) { assert(status == HIPBLAS_STATUS_SUCCESS); } void CallCurand(hiprandStatus_t status) { assert(status == HIPRAND_STATUS_SUCCESS); } hipblasHandle_t& GetCublasHandle() { static hipblasHandle_t handle; static bool init; if (!init) { init = true; CallCublas(hipblasCreate(&handle)); } return handle; } hipError_t MyCudaMemcpy(void *dest, const void *src, size_t count, hipMemcpyKind kind) { hipError_t e = hipMemcpy(dest, src, count, kind); return e; } void NumberPointerArray::init(dtype **host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype*))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*), hipMemcpyHostToDevice)); this->len = len; } NumberPointerArray::~NumberPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Memcpy(dtype *dest, dtype*src, int size, hipMemcpyKind kind) { CallCuda(MyCudaMemcpy(dest, src, size, kind)); } void NumberPointerPointerArray::init(dtype ***host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype**))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*), hipMemcpyHostToDevice)); this->len = len; } NumberPointerPointerArray::~NumberPointerPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void NumberArray::init(int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype))); this->len = len; } void NumberArray::init(dtype *host_arr, int len) { init(len); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype), hipMemcpyHostToDevice)); } NumberArray::~NumberArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void DeviceInt::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceInt::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(int), hipMemcpyDeviceToHost)); } void DeviceInt::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, &v, sizeof(int), hipMemcpyHostToDevice)); } DeviceInt::~DeviceInt() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void DeviceNumber::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceNumber::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(dtype), hipMemcpyDeviceToHost)); } DeviceNumber::~DeviceNumber() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void IntPointerArray::init(int **host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int*))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int*), hipMemcpyHostToDevice)); this->len = len; } IntPointerArray::~IntPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void IntArray::init(int *host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int), hipMemcpyHostToDevice)); this->len = len; } void IntArray::init(int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int))); this->len = len; } IntArray::~IntArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void BoolArray::init(bool *host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(bool))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool), hipMemcpyHostToDevice)); this->len = len; } void BoolArray::copyFromHost(bool *host_arr) { CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool), hipMemcpyHostToDevice)); } void BoolArray::copyToHost(bool *host_arr) { CallCuda(MyCudaMemcpy(host_arr, value, len * sizeof(bool), hipMemcpyDeviceToHost)); } BoolArray::~BoolArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor1D::init(int dim) { initOnDevice(dim); #if TEST_CUDA v = new dtype[dim]; zero(); #endif } void Tensor1D::initOnDevice(int dim) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, dim * sizeof(dtype))); this->dim = dim; } Tensor1D::Tensor1D(const Tensor1D &t) { dim = t.dim; memcpy(v, t.v, dim *sizeof(dtype)); CallCuda(MyCudaMemcpy(value, t.value, dim * sizeof(dtype), hipMemcpyDeviceToDevice)); } Tensor1D::~Tensor1D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } if (v != NULL) { delete []v; } } void Tensor1D::copyFromHostToDevice() { assert(v != NULL); assert(value != NULL); CallCuda(MyCudaMemcpy(value, v, dim * sizeof(dtype), hipMemcpyHostToDevice)); } void Tensor1D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, dim * sizeof(dtype), hipMemcpyDeviceToHost)); } void Tensor2D::initOnMemoryAndDevice(int row, int col) { initOnDevice(row, col); v = new dtype[row * col]; zero(); } void Tensor2D::init(int row, int col) { initOnDevice(row, col); #if TEST_CUDA v = new dtype[row * col]; zero(); #endif } void Tensor2D::initOnDevice(int row, int col) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, row * col * sizeof(dtype))); this->row = row; this->col = col; this->size = row * col; } Tensor2D::Tensor2D(const Tensor2D &t) { row = t.row; col = t.col; memcpy(v, t.v, sizeof(dtype) * row * col); CallCuda(MyCudaMemcpy(value, t.value, sizeof(dtype) * row * col, hipMemcpyDeviceToDevice)); } Tensor2D::~Tensor2D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } if (v != NULL) { delete [] v; } } void Tensor2D::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, v, size * sizeof(dtype), hipMemcpyHostToDevice)); } void Tensor2D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, size * sizeof(dtype), hipMemcpyDeviceToHost)); } void Assert(bool v) { #if TEST_CUDA if (!v) { abort(); } #endif } __device__ void DeviceAtomicAdd(float* address, float value) { float old = value; float new_old; do { new_old = atomicExch(address, 0.0f); new_old += old; } while ((old = atomicExch(address, new_old))!=0.0f); }; __device__ dtype cuda_dtanh(dtype y) { return 1.0f - y * y; } __device__ dtype cuda_sigmoid(dtype x) { return 1.0f / (1.0f + cuda_exp(-x)); } __device__ dtype cuda_dsigmoid(dtype y) { return y * (1.0f - y); } __device__ dtype cuda_relu(dtype x) { return x > 0.0f ? x : 0.0f; } __device__ dtype cuda_drelu(dtype x) { return x > 0.0f ? 1 : 0.0f; } __device__ dtype cuda_leaky_relu(dtype x) { return x > 0.0f ? x : -0.1f * x; } __device__ dtype cuda_dleaky_relu(dtype x) { return x > 0.0f ? 1.0f : -0.1f; } const dtype SELU_LAMBDA = 1.0507009873554804934193349852946; const dtype SELU_ALPHA = 1.6732632423543772848170429916717; __device__ dtype cuda_selu(dtype x) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA * (cuda_exp(x) - 1.0f) : SELU_LAMBDA * x; } __device__ dtype cuda_dselu(dtype x, dtype y) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA + y : SELU_LAMBDA; } void Random(dtype *v, int len, dtype bound) { dtype *mem = (dtype*)malloc(len * sizeof(dtype)); assert(mem != NULL); dtype min = -bound, max = bound; for (int i = 0; i < len; i++) { mem[i] = (dtype(rand()) / RAND_MAX) * (max - min) + min; } CallCuda(MyCudaMemcpy(v, mem, len * sizeof(dtype), hipMemcpyHostToDevice)); free(mem); } __device__ int DeviceDefaultIndex() { return blockIdx.x * blockDim.x + threadIdx.x; } __device__ int DeviceDefaultStep() { return gridDim.x * blockDim.x; } __device__ dtype DeviceAbs(dtype d) { return d > 0 ? d : -d; } int DefaultBlockCount(int len) { int block_count = (len - 1 + TPB) / TPB; return ::min(block_count, BLOCK_COUNT); } int DefaultBlockCountWithoutLimit(int len) { return (len - 1 + TPB) / TPB; } __global__ void KernelZero(dtype *v, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) { return; } v[index] = 0; } void Zero(dtype *v, int len) { int block_count = (len - 1 + TPB) / TPB; hipLaunchKernelGGL(( KernelZero), dim3(block_count), dim3(TPB), 0, 0, v, len); } __global__ void PrintPointers(void **p, int len) { for (int i = 0; i < len; ++i) { printf("%p\n", p[i]); } } __global__ void KernelPrintNums(const dtype* p, int len) { for (int i = 0; i < len; ++i) { printf("%f\n", p[i]); } } void PrintNums(const dtype* p, int len) { hipLaunchKernelGGL(( KernelPrintNums), dim3(1), dim3(1), 0, 0, p, len); hipDeviceSynchronize(); } __global__ void KernelPrintInts(const int* p, int len) { for (int i = 0; i < len; ++i) { printf("%d\n", p[i]); } } void PrintInts(const int* p, int len) { hipLaunchKernelGGL(( KernelPrintInts), dim3(1), dim3(1), 0, 0, p, len); hipDeviceSynchronize(); } void InitCuda() { CallCuda(hipSetDeviceFlags(hipDeviceMapHost)); #if DEVICE_MEMORY == 0 cnmemDevice_t device; device.size = 2000000000; device.device = 0; cnmemInit(1, &device, CNMEM_FLAGS_DEFAULT); #else CallCuda(hipSetDevice(0)); #endif CallCuda(hipDeviceSetCacheConfig(hipFuncCachePreferL1)); CallCuda(cudaPrintfInit()); } void EndCuda() { cudaPrintfEnd(); Profiler::Ins().Print(); } __global__ void KernelCopyFromOneVectorToMultiVectors(const dtype *src, dtype **dest, int count, int len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * len; i += step) { int count_i = i % count; int len_i = i / count; dest[count_i][len_i] = src[i]; } } void CopyFromOneVectorToMultiVals(const dtype *src, std::vector<dtype*> &vals, int count, int len) { NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); int block_count = (len * count - 1 + TPB) / TPB; block_count = ::min(block_count, BLOCK_COUNT); hipLaunchKernelGGL(( KernelCopyFromOneVectorToMultiVectors), dim3(block_count), dim3(TPB), 0, 0, src, val_arr.value, count, len); } __global__ void KernelActivated(ActivatedEnum activated, const dtype *src, dtype**dest, dtype* dest2, int count, int len, bool is_being_trained, dtype drop_factor, const dtype *drop_mask) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = blockDim.x * gridDim.x; for (int i = index; i < len * count; i += step) { int count_i = i % count; int len_i = i / count; dtype result; if (activated == ActivatedEnum::TANH) { result = cuda_tanh(src[i]); } else if (activated == ActivatedEnum::SIGMOID) { result = cuda_sigmoid(src[i]); } else if (activated == ActivatedEnum::RELU) { result = cuda_relu(src[i]); } else if (activated == ActivatedEnum::LEAKY_RELU) { result = cuda_leaky_relu(src[i]); } else if (activated == ActivatedEnum::SELU) { result = cuda_selu(src[i]); } else { printf("KernelActivated error\n"); return; } if (is_being_trained) { if (drop_factor > 0 && drop_mask[i] <= drop_factor) { dest[count_i][len_i] = 0.0f; dest2[i] = result; } else { dest[count_i][len_i] = result; dest2[i] = result; } } else { dest[count_i][len_i] = result * (1 - drop_factor); dest2[i] = result; } } } void Activated(ActivatedEnum activated, const dtype *src, const std::vector<dtype*>& dest, dtype *dest2, int len, bool is_being_trained, dtype drop_factor, const dtype *drop_mask) { if (drop_factor < 0) { drop_factor = 0; } int count = dest.size(); NumberPointerArray dest_arr; dest_arr.init((dtype**)dest.data(), dest.size()); int block_count = ::min((len * count - 1 + TPB) / TPB, BLOCK_COUNT); hipLaunchKernelGGL(( KernelActivated), dim3(block_count), dim3(TPB), 0, 0, activated, src, dest_arr.value, dest2, count, len, is_being_trained, drop_factor, drop_mask); } __global__ void KernelTanhForward(ActivatedEnum activated, const dtype** xs, int count, int dim, const dtype* drop_mask, dtype drop_factor, dtype**ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (drop_factor > 0.0f && drop_mask[i] < drop_factor) { ys[count_i][dim_i] = 0.0f; } else { if (activated == ActivatedEnum::TANH) { ys[count_i][dim_i] = cuda_tanh(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { ys[count_i][dim_i] = cuda_sigmoid(xs[count_i][dim_i]); } else { printf("error\n"); } } } } void TanhForward(ActivatedEnum activated, const std::vector<dtype*> &xs, int count, int dim, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &ys) { if (drop_factor < 0) { drop_factor = 0.0f; } NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelTanhForward), dim3(block_count), dim3(TPB), 0, 0, activated, (const dtype**)x_arr.value, count, dim, drop_mask, drop_factor, y_arr.value); } __global__ void KernelTanhBackward(ActivatedEnum activated, const dtype **losses, const dtype **vals, int count, int dim, const dtype* drop_mask, dtype drop_factor, dtype** in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (drop_factor <= 0.0f || drop_mask[i] > drop_factor) { dtype v; if (activated == ActivatedEnum::TANH) { v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i] * vals[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i]) * vals[count_i][dim_i]; } atomicAdd(in_losses[count_i] + dim_i, v); } } } void TanhBackward(ActivatedEnum activated, const std::vector<dtype*> &losses, const std::vector<dtype*> &vals, int count, int dim, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &in_losses) { if (drop_factor < 0) { drop_factor = 0.0f; } NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelTanhBackward), dim3(block_count), dim3(TPB), 0, 0, activated ,(const dtype**)loss_arr.value, (const dtype**)val_arr.value, count, dim, drop_mask, drop_factor, in_loss_arr.value); } __global__ void KernelDropoutForward(const dtype** xs, int count, int dim, const dtype* drop_mask, dtype drop_factor, dtype**ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (drop_factor > 0.0f && drop_mask[i] < drop_factor) { ys[count_i][dim_i] = 0.0f; } else { ys[count_i][dim_i] = xs[count_i][dim_i]; } } } void DropoutForward(const std::vector<dtype*> &xs, int count, int dim, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &ys) { if (drop_factor < 0) { drop_factor = 0.0f; } NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelDropoutForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)x_arr.value, count, dim, drop_mask, drop_factor, y_arr.value); } __global__ void KernelDropoutBackward(const dtype **losses, const dtype **vals, int count, int dim, const dtype* drop_mask, dtype drop_factor, dtype** in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (drop_factor <= 0.0f || drop_mask[i] > drop_factor) { atomicAdd(in_losses[count_i] + dim_i, losses[count_i][dim_i]); } } } void DropoutBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &vals, int count, int dim, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &in_losses) { if (drop_factor < 0) { drop_factor = 0.0f; } NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelDropoutBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)loss_arr.value, (const dtype**)val_arr.value, count, dim, drop_mask, drop_factor, in_loss_arr.value); } __global__ void KernelCopyForUniNodeForward(const dtype** xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; int x_total_len = count * x_len; int b_total_len = count * b_len; for (int i = index; i < x_total_len + b_total_len; i += step) { if (i < x_total_len) { int len_i = i / count; int count_i = i % count; xs_dest[i] = xs[count_i][len_i]; } else if (use_b) { int b_i = i - x_total_len; int len_i = b_i / count; b_dest[b_i] = b[len_i]; } } } void CopyForUniNodeForward(const std::vector<dtype*> &xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { NumberPointerArray x_arr; x_arr.init((dtype**)xs.data(), xs.size()); int len = x_len + b_len; int block_count = ::min((count * len - 1 + TPB) / TPB, 56); hipLaunchKernelGGL(( KernelCopyForUniNodeForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)x_arr.value, (const dtype*)b, xs_dest, b_dest, count, x_len, b_len, use_b); } __global__ void KernelCopyForBiNodeForward(const dtype **x1s, const dtype **x2s, const dtype *b, dtype *x1s_dest, dtype *x2s_dest, dtype *b_dest, int count, int x1_len, int x2_len, int b_len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int x1_total_len = count * x1_len; int x2_total_len = count * x2_len; int b_total_len = count * b_len; int total_len = x1_total_len + x2_total_len + b_total_len; for (int i = index; i < total_len; i += step) { if (i < x2_total_len) { int len_i = i / count; int count_i = i % count; x2s_dest[i] = x2s[count_i][len_i]; } else if (i >= x2_total_len && i < x1_total_len + x2_total_len) { int len_i = (i - x2_total_len) / count; int count_i = (i - x2_total_len) % count; x1s_dest[i - x2_total_len] = x1s[count_i][len_i]; } else { int b_i = (i - x1_total_len - x2_total_len); int len_i = b_i / count; b_dest[b_i] = b[len_i]; } } } void CopyForBiNodeForward(const std::vector<dtype*>& x1s, const std::vector<dtype *>& x2s, const dtype *b, dtype *x1s_dest, dtype *x2s_dest, dtype *b_dest, int count, int x1_len, int x2_len, int b_len) { int len = x1_len + x2_len + b_len; int block_count = DefaultBlockCount(count * len); NumberPointerArray x1_arr, x2_arr; x1_arr.init((dtype**)x1s.data(), x1s.size()); x2_arr.init((dtype**)x2s.data(), x2s.size()); hipLaunchKernelGGL(( KernelCopyForBiNodeForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)x1_arr.value, (const dtype**)x2_arr.value, b, x1s_dest, x2s_dest, b_dest, count, x1_len, x2_len, b_len); } void MatrixMultiplyMatrix(dtype *W, dtype *x, dtype *y, int row, int col, int count, bool useb, bool should_x_transpose, bool should_W_transpose) { hipblasHandle_t &handle = GetCublasHandle(); float alpha = 1; float beta = useb? 1 : 0; hipblasOperation_t x_op = should_x_transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N; int ldx = should_x_transpose ? col : count; hipblasOperation_t W_op = should_W_transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N; int ldw = should_W_transpose ? row : col; #if USE_FLOAT CallCublas(hipblasSgemm(handle, x_op, W_op, count, row, col, &alpha, x, ldx, W, ldw, &beta, y, count)); #else CallCublas(hipblasDgemm(handle, x_op, W_op, count, row, col, &alpha, x, ldx, W, ldw, &beta, y, count)); #endif } __global__ void KernelVerify(dtype *host, dtype *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { dtype loss = host[index] - device[index]; if (DeviceAbs(loss) > 0.0001) { *success = false; printf("KernelVerify %s: host:%f device:%f loss:%f index:%d\n", message, host[index], device[index], loss, index); KernelPrintLine("KernelVerify: host:%f device:%f loss:%f", host[index], device[index], loss); } } } bool Verify(dtype *host, dtype *device, int len, const char* message) { NumberArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), hipMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); hipDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } __global__ void KernelVerify(bool *host, bool *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(bool *host, bool *device, int len, const char* message) { BoolArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), hipMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); hipDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } __global__ void KernelVerify(int *host, int *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(int *host, int *device, int len, const char* message) { IntArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), hipMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); hipDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } hipError_t MemoryPool::Malloc(void **p, int size) { assert(*p == NULL); Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Malloc"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemMalloc(p, size, NULL)); profiler.EndEvent(); return hipSuccess; #elif DEVICE_MEMORY == 1 hipError_t r = hipMalloc(p, size); profiler.EndEvent(); return r; #else int fit_size = 1; int n = 0; while (fit_size < size) { fit_size <<= 1; ++n; } hipError_t status = hipSuccess; if (free_blocks_.at(n).empty()) { status = hipMalloc(p, fit_size); CallCuda(status); MemoryBlock block(*p, fit_size); busy_blocks_.insert(std::make_pair(*p, block)); } else { int this_size = free_blocks_.at(n).size(); MemoryBlock &block = free_blocks_.at(n).at(this_size - 1); *p = block.p; busy_blocks_.insert(std::make_pair(block.p, block)); free_blocks_.at(n).resize(this_size - 1); } profiler.EndEvent(); return status; #endif } hipError_t MemoryPool::Free(void *p) { Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Free"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemFree(p, NULL)); profiler.EndEvent(); #elif DEVICE_MEMORY == 1 hipError_t r = hipFree(p); profiler.EndEvent(); return r; #else auto it = busy_blocks_.find(p); if (it == busy_blocks_.end()) { abort(); } int size = it->second.size; int n = 0; while (size > 1) { size >>= 1; ++n; } free_blocks_.at(n).push_back(it->second); busy_blocks_.erase(it); profiler.EndEvent(); return hipSuccess; #endif } void Profiler::EndCudaEvent() { hipDeviceSynchronize(); EndEvent(); } __global__ void KernelCalculateLtyForUniBackward(ActivatedEnum activated, const dtype *const*ly, const dtype *ty, const dtype *y, const dtype *drop_mask, dtype drop_factor, dtype *lty, int count, int dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = count * dim; for (int i = index; i < len; i += step) { int count_i = i % count; int dim_i = i / count; dtype yi = y[i]; if (drop_factor > 0.0f && drop_mask[i] < drop_factor) { lty[i] = 0.0f; } else { dtype lyv = ly[count_i][dim_i]; if (activated == ActivatedEnum::TANH) { lty[i] = lyv * cuda_dtanh(yi); } else if (activated == ActivatedEnum::SIGMOID) { lty[i] = lyv * cuda_dsigmoid(yi); } else if (activated == ActivatedEnum::RELU) { lty[i] = lyv * cuda_drelu(ty[i]); } else if (activated == ActivatedEnum::LEAKY_RELU) { lty[i] = lyv * cuda_dleaky_relu(ty[i]); } else if (activated == ActivatedEnum::SELU) { lty[i] = lyv * cuda_dselu(ty[i], yi); } else { printf("KernelCalculateLtyForUniBackward error\n"); } } } } void CalculateLtyForUniBackward(ActivatedEnum activated, const std::vector<dtype*> &ly, const dtype *ty, const dtype *y, const dtype *drop_mask, dtype drop_factor, dtype *lty, int count, int dim) { if (drop_factor < 0) { drop_factor = 0; } NumberPointerArray ly_arr; ly_arr.init((dtype**)ly.data(), ly.size()); int block_count = ::min(BLOCK_COUNT, (count * dim + TPB - 1) / TPB); hipLaunchKernelGGL(( KernelCalculateLtyForUniBackward), dim3(block_count), dim3(TPB), 0, 0, activated, ly_arr.value, ty, y, drop_mask, drop_factor, lty, count, dim); hipDeviceSynchronize(); } __global__ void KernelCalculateLyForLinearBackward(const dtype *const*ly_vec, dtype *ly, int count, int dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = count * dim; for (int i = index; i < len; i += step) { int count_i = i % count; int dim_i = i / count; ly[i] = ly_vec[count_i][dim_i]; } } void CalculateLyForLinearBackward(const std::vector<dtype*> &ly_vec, dtype *ly, int count, int dim) { NumberPointerArray ly_arr; ly_arr.init((dtype**)ly_vec.data(), ly_vec.size()); int block_count = ::min(BLOCK_COUNT, (count * dim + TPB - 1) / TPB); hipLaunchKernelGGL(( KernelCalculateLyForLinearBackward), dim3(block_count), dim3(TPB), 0, 0, ly_arr.value, ly, count, dim); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward( const dtype *lty, const dtype *lx, dtype *b, dtype **losses, int count, int out_dim, int in_dim, dtype *block_sums, int *global_block_count, bool use_b) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (use_b) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } int lty_index = dim_i * count + count_i; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } DeviceAtomicAdd(b + dim_i, sum); } } } } else { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i * count + count_i; DeviceAtomicAdd(losses[count_i] + dim_i, lx[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForUniBackward(const dtype *lty, const dtype *lx, dtype *b, std::vector<dtype*> &losses, int count, int out_dim, int in_dim, bool use_b) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim, block_y, 1); NumberPointerArray loss_arr; loss_arr.init(losses.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); hipLaunchKernelGGL(( KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward), dim3(block_dim), dim3(TPB), 0, 0, lty, lx, b, loss_arr.value, count, out_dim, in_dim, block_sums.value, global_block_count_arr.value, use_b); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward( const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, dtype **losses1, dtype **losses2, int count, int out_dim, int in_dim1, int in_dim2, dtype *block_sums, int *global_block_count) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } int lty_index = dim_i * count + count_i; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } DeviceAtomicAdd(b + dim_i, sum); } } } else if (dim_i < out_dim + in_dim1) { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i * count + count_i; DeviceAtomicAdd(losses1[count_i] + dim_i, lx1[lx_index]); } } else { if (count_i < count) { dim_i -= (out_dim + in_dim1); int lx_index = dim_i * count + count_i; DeviceAtomicAdd(losses2[count_i] + dim_i, lx2[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForBiBackward(const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, std::vector<dtype*> &losses1, std::vector<dtype*> &losses2, int count, int out_dim, int in_dim1, int in_dim2) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim1 + in_dim2, block_y, 1); NumberPointerArray loss1_arr; loss1_arr.init(losses1.data(), count); NumberPointerArray loss2_arr; loss2_arr.init(losses2.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); hipLaunchKernelGGL(( KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward), dim3(block_dim), dim3(TPB), 0, 0, lty, lx1, lx2, b, loss1_arr.value, loss2_arr.value, count, out_dim, in_dim1, in_dim2, block_sums.value, global_block_count_arr.value); } constexpr int MAX_BATCH_COUNT = 1000000; __global__ void KernelInitCurandStates(hiprandState_t *states) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; for (int i = index; i < MAX_BATCH_COUNT; i += step) { hiprand_init(0, i, 0, &states[i]); } } hiprandState_t *GetCurandStates() { static hiprandState_t *states; if (states == NULL) { MemoryPool &pool = MemoryPool::Ins(); CallCuda(pool.Malloc((void**)&states, sizeof(hiprandState_t) * MAX_BATCH_COUNT)); hipLaunchKernelGGL(( KernelInitCurandStates), dim3(BLOCK_COUNT), dim3(TPB), 0, 0, states); } return states; } hiprandGenerator_t &GetGenerator() { static hiprandGenerator_t gen; static bool init; if (!init) { CallCurand(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); CallCurand(hiprandSetPseudoRandomGeneratorSeed(gen, 0)); init = true; } return gen; } void CalculateDropoutMask(dtype drop_factor, int count, int dim, dtype* mask) { hiprandGenerator_t &gen = GetGenerator(); CallCurand(hiprandGenerateUniform(gen, mask, count * dim)); } __global__ void KernelConcatForward(dtype **ins, int *in_dims, dtype **outs, bool on_training, const dtype *drop_mask, dtype drop_factor, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; if (on_training) { if (drop_factor > 0.0f && drop_mask[out_dim_i * count + count_i] < drop_factor) { outs[count_i][out_dim_i] = 0.0f; } else { int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; dtype v = ins[count_i * in_count + offset_j][in_dim_i]; outs[count_i][out_dim_i] = v; } } else { int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; dtype v = ins[count_i * in_count + offset_j][in_dim_i]; outs[count_i][out_dim_i] = v * (1 - drop_factor); } } } void ConcatForward(const std::vector<dtype*> &in_vals, const std::vector<int> &in_dims, std::vector<dtype*> &vals, bool on_training, const dtype *drop_mask, dtype drop_factor, int count, int in_count, int out_dim) { assert(drop_factor < 1); if (drop_factor < 0) { drop_factor = 0; } int len = count * out_dim; int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_val_arr, val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); hipLaunchKernelGGL(( KernelConcatForward), dim3(block_count), dim3(TPB), 0, 0, in_val_arr.value, in_dim_arr.value, val_arr.value, on_training, drop_mask, drop_factor, count, in_count, out_dim); } __global__ void KernelConcatBackward(dtype** in_losses, int *in_dims, dtype **out_losses, const dtype *drop_mask, dtype drop_factor, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; dtype dropout = drop_factor > 0 ? drop_mask[out_dim_i * count + count_i] : 1; if (dropout > drop_factor) { int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; DeviceAtomicAdd(in_losses[count_i * in_count + offset_j] + in_dim_i, out_losses[count_i][out_dim_i]); } } } void ConcatBackward(const std::vector<dtype*> &in_losses, const std::vector<int> &in_dims, std::vector<dtype*> &losses, const dtype *drop_mask, dtype drop_factor, int count, int in_count, int out_dim) { assert(drop_factor < 1); if (drop_factor < 0) { drop_factor = 0; } int len = count * out_dim; int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_loss_arr, loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); hipLaunchKernelGGL(( KernelConcatBackward), dim3(block_count), dim3(TPB), 0, 0, in_loss_arr.value, in_dim_arr.value, loss_arr.value, drop_mask, drop_factor, count, in_count, out_dim); } __global__ void KernelMemset(dtype *p, int len, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(dtype *p, int len, dtype value) { int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); hipLaunchKernelGGL(( KernelMemset), dim3(block_count), dim3(TPB), 0, 0, p, len, value); } __global__ void KernelMemset(bool *p, int len, bool value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(bool *p, int len, bool value) { int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); hipLaunchKernelGGL(( KernelMemset), dim3(block_count), dim3(TPB), 0, 0, p, len, value); } void *Malloc(int size) { void *p; CallCuda(hipMalloc(&p, size)); return p; } void Memcpy(void *dest, void *src, int size, hipMemcpyKind kind) { CallCuda(hipMemcpy(dest, src, size, kind)); } __global__ void KernelBatchMemset(dtype **p, int count, int dim, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count ; i += step) { int count_i = i / dim; int dim_i = i % dim; p[count_i][dim_i] = value; } } void BatchMemset(const std::vector<dtype*> &vec, int count, int dim, dtype value) { int block_count = (count * dim -1 + TPB) / TPB; block_count = ::min(block_count, BLOCK_COUNT); NumberPointerArray vec_arr; vec_arr.init((dtype**)vec.data(), vec.size()); hipLaunchKernelGGL(( KernelBatchMemset), dim3(block_count), dim3(TPB), 0, 0, vec_arr.value, count, dim, value); } __global__ void KernelLookupForward(const int *xids, const dtype *vocabulary, bool on_training, const dtype *drop_mask, dtype drop_factor, int count, int dim, dtype **vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; if (on_training) { if (drop_factor > 0 && drop_mask[dim_i * count + count_i] < drop_factor) { vals[count_i][dim_i] = 0.0f; } else { int xid = xids[count_i]; if (xid >= 0) { int voc_i = xid * dim + dim_i; vals[count_i][dim_i] = vocabulary[voc_i]; } else { vals[count_i][dim_i] = 0.0f; } } } else { int xid = xids[count_i]; if (xid >= 0) { int voc_i = xid * dim + dim_i; vals[count_i][dim_i] = vocabulary[voc_i] * (1 - drop_factor); } else { vals[count_i][dim_i] = 0.0f; } } } } void LookupForward(const std::vector<int> &xids, const dtype *vocabulary, bool on_training, const dtype *drop_mask, dtype drop_factor, int count, int dim, std::vector<dtype*> &vals) { if (drop_factor < 0) { drop_factor = 0; } int block_count = ::min(BLOCK_COUNT, (count * dim - 1 + TPB) / TPB); IntArray xid_arr; xid_arr.init((int*)xids.data(), xids.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); hipLaunchKernelGGL(( KernelLookupForward), dim3(block_count), dim3(TPB), 0, 0, xid_arr.value, vocabulary, on_training, drop_mask, drop_factor, count, dim, const_cast<dtype**>(val_arr.value)); } __global__ void KernelLookupBackward(const int *xids, int unknown_id, bool fine_tune, const dtype** losses, const dtype *drop_mask, dtype drop_factor, int count, int dim, dtype *grad, bool *indexers) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (xid == unknown_id || fine_tune) { assert(xid >= 0); if (dim_i == 0) { indexers[xid] = true; } dtype dropout = drop_factor > 0 ? drop_mask[dim_i * count + count_i] : 1; if (drop_factor < dropout) { DeviceAtomicAdd(grad + xid * dim + dim_i, losses[count_i][dim_i]); } } } } void LookupBackward(const std::vector<int> &xids, int unknown_id, bool fine_tune, const std::vector<dtype*> &losses, const dtype *drop_mask, dtype drop_factor, int count, int dim, dtype *grad, bool *indexers) { int block_count = ::min((count * dim - 1 + TPB) / TPB, BLOCK_COUNT); IntArray pl_arr; pl_arr.init((int*)xids.data(), xids.size()); IntArray xid_arr; xid_arr.init((int*)pl_arr.value, xids.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); hipLaunchKernelGGL(( KernelLookupBackward), dim3(block_count), dim3(TPB), 0, 0, const_cast<const int *>(xid_arr.value), unknown_id, fine_tune, const_cast<const dtype**>(loss_arr.value), drop_mask, drop_factor, count, dim, grad, indexers); } __global__ void KernelPoolForward(PoolingEnum pooling, dtype **ins, int *in_counts, int max_in_count, dtype **outs, int count, int dim, int* hit_inputs) { __shared__ volatile extern dtype pool_shared_arr[]; volatile dtype* shared_indexers = pool_shared_arr + blockDim.x; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = ins[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = pooling == PoolingEnum::MAX ? -INFINITY : INFINITY; } shared_indexers[threadIdx.x] = threadIdx.x; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; if (pooling == PoolingEnum::MAX) { if (pool_shared_arr[threadIdx.x] < pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } else { if (pool_shared_arr[threadIdx.x] > pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } } __syncthreads(); } if (threadIdx.x == 0) { hit_inputs[batch_i * dim + dim_i] = shared_indexers[0]; outs[batch_i][dim_i] = pool_shared_arr[0]; } } void PoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, std::vector<dtype*> &vals, int count, const std::vector<int> &in_counts, int dim, int *hit_inputs) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); hipLaunchKernelGGL(( KernelPoolForward), dim3(block_dim), dim3(thread_count), thread_count * 2 * sizeof(dtype), 0, pooling, in_val_arr.value, in_count_arr.value, max_in_count, val_arr.value, count, dim, hit_inputs); } __global__ void KernelPoolBackward(const dtype ** losses, const int *hit_inputs, int max_in_count, int count, int dim, dtype **in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; int input_i = hit_inputs[i]; dtype loss = losses[count_i][dim_i]; DeviceAtomicAdd(in_losses[count_i * max_in_count + input_i] + dim_i, loss); } } void PoolBackward(const std::vector<dtype*> &losses, std::vector<dtype*> &in_losses, const std::vector<int> &in_counts, const int *hit_inputs, int count, int dim) { NumberPointerArray loss_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int block_count = (count * dim - 1 + TPB) / TPB; block_count = ::min(block_count, BLOCK_COUNT); hipLaunchKernelGGL(( KernelPoolBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)loss_arr.value, hit_inputs, max_in_count, count, dim, in_loss_arr.value); } __global__ void KernelSumPoolForward(PoolingEnum pooling, const dtype **in_vals, int count, int dim, const int *in_counts, int max_in_count, dtype **vals) { __shared__ volatile extern dtype pool_shared_arr[]; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = in_vals[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; pool_shared_arr[threadIdx.x] += pool_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[batch_i][dim_i] = pooling == PoolingEnum::SUM ? pool_shared_arr[0] : pool_shared_arr[0] / in_counts[batch_i]; } } void SumPoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, int count, int dim, const std::vector<int> &in_counts, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); hipLaunchKernelGGL(( KernelSumPoolForward), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, pooling, (const dtype**)in_val_arr.value, count, dim, (const int*)in_count_arr.value, max_in_count, val_arr.value); } __global__ void KernelSumBackward(PoolingEnum pooling, const dtype **losses, const int *in_counts, int max_in_count, int count, int dim, dtype **in_losses) { int global_in_count_i = blockIdx.x * max_in_count + blockIdx.y; if (blockIdx.y < in_counts[blockIdx.x] && threadIdx.x < dim) { atomicAdd(in_losses[global_in_count_i] + threadIdx.x, pooling == PoolingEnum::SUM ? losses[blockIdx.x][threadIdx.x] : losses[blockIdx.x][threadIdx.x] / in_counts[blockIdx.x]); } } void SumPoolBackward(PoolingEnum pooling, const std::vector<dtype*> &losses, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses) { int thread_count = 8; while (thread_count < dim) { thread_count <<= 1; } int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); dim3 block_dim(count, max_in_count, 1); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray in_loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); hipLaunchKernelGGL(( KernelSumBackward), dim3(block_dim), dim3(thread_count), 0, 0, pooling, (const dtype**)loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, in_loss_arr.value); } __global__ void KernelScalarAttentionForward(const dtype** ins, const dtype **unnormeds, const int *in_counts, int max_in_count, int count, int dim, dtype **masks, dtype **vals) { __shared__ volatile extern dtype attention_shared_arr[]; volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x; int count_i = blockIdx.y; int in_count = in_counts[count_i]; int dim_i = blockIdx.x; int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x; dtype unnormed_mask = threadIdx.x < in_count ? cuda_exp(unnormeds[global_in_count_i][0]) : 0.0f; attention_shared_arr[threadIdx.x] = unnormed_mask; shared_unnormed_masks[threadIdx.x] = unnormed_mask; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] / attention_shared_arr[0] : 0.0f; if (threadIdx.x < in_count) { masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask; } dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f; attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ? mask * in : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0]; } } void ScalarAttentionForward(const std::vector<dtype*> &ins, const std::vector<dtype*> &unnormeds, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &masks, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_arr; in_arr.init((dtype**)ins.data(), ins.size()); NumberPointerArray unnormed_arr; unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size()); NumberPointerArray mask_arr; mask_arr.init((dtype**)masks.data(), masks.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); hipLaunchKernelGGL(( KernelScalarAttentionForward), dim3(block_dim), dim3(thread_count), 2 * thread_count * sizeof(dtype), 0, (const dtype**)in_arr.value, (const dtype**)unnormed_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_arr.value, val_arr.value); } __global__ void KernelScalarAttentionMaskAndInLoss(const dtype **losses, const dtype **in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype *mask_losses, dtype **in_losses) { // blockIdx.x : in_count_i // blockIdx.y : count_i // threadIdx.x : dim_i __shared__ extern volatile dtype att_mask_loss_shared_arr[]; int in_count = in_counts[blockIdx.y]; int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x; if (in_count <= blockIdx.x) { return; } for (int i = threadIdx.x; i < dim; i += blockDim.x) { atomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] * masks[blockIdx.y][max_in_count * threadIdx.x + blockIdx.x]); } att_mask_loss_shared_arr[threadIdx.x] = 0.0f; for (int i = threadIdx.x; i < dim; i += blockDim.x) { att_mask_loss_shared_arr[threadIdx.x] += losses[blockIdx.y][i] * in_vals[global_in_count_i][i]; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { att_mask_loss_shared_arr[threadIdx.x] += att_mask_loss_shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { mask_losses[global_in_count_i] = att_mask_loss_shared_arr[0]; } } void ScalarAttentionMaskAndInLoss(const dtype** losses, const dtype** in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype *mask_losses, dtype **in_losses) { dim3 block_dim(max_in_count, count, 1); int thread_count = 8; if (dim >= TPB) { thread_count = TPB; } else { while (dim > thread_count) { thread_count <<= 1; } } hipLaunchKernelGGL(( KernelScalarAttentionMaskAndInLoss), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, losses, in_vals, masks, in_counts, max_in_count, count, dim, mask_losses, in_losses); } __global__ void KernelScalarAttentionBackward(const dtype** masks, const dtype *mask_losses, const int *in_counts, int max_in_count, int count, int dim, dtype **unnormed_losses) { __shared__ volatile extern dtype shared_att_bckwrd_arr[]; int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x; int in_count = in_counts[blockIdx.x]; if (threadIdx.x < in_count && blockIdx.y == 0) { atomicAdd(unnormed_losses[global_in_count_i], masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[global_in_count_i]); } shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ? masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[global_in_count_i] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_att_bckwrd_arr[threadIdx.x] += shared_att_bckwrd_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x < in_count && blockIdx.y == 0) { atomicAdd(unnormed_losses[global_in_count_i], -shared_att_bckwrd_arr[0] * masks[blockIdx.x][threadIdx.x]); } } void ScalarAttentionBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals, const std::vector<dtype*> &masks, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses, std::vector<dtype*> &unnormed_losses) { NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr, in_val_arr; loss_arr.init((dtype**)losses.data(), losses.size()); mask_arr.init((dtype**)masks.data(), masks.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); unnormed_loss_arr.init((dtype**)unnormed_losses.data(), unnormed_losses.size()); in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); NumberArray mask_loss_arr; mask_loss_arr.init(count * max_in_count); ScalarAttentionMaskAndInLoss((const dtype**)loss_arr.value, (const dtype**)in_val_arr.value, (const dtype**)mask_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_loss_arr.value, in_loss_arr.value); dim3 block_dim(count, dim, 1); int thread_count = 8; while (thread_count < max_in_count) { thread_count <<= 1; } hipLaunchKernelGGL(( KernelScalarAttentionBackward), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, (const dtype**)mask_arr.value, (const dtype*)mask_loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, unnormed_loss_arr.value); } __global__ void KernelVectorAttentionForward(const dtype** ins, const dtype **unnormeds, const int *in_counts, int max_in_count, int count, int dim, dtype **masks, dtype **vals) { __shared__ volatile extern dtype attention_shared_arr[]; volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x; int count_i = blockIdx.y; int in_count = in_counts[count_i]; int dim_i = blockIdx.x; int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x; dtype unnormed_mask = threadIdx.x < in_count ? cuda_exp(unnormeds[global_in_count_i][blockIdx.x]) : 0.0f; attention_shared_arr[threadIdx.x] = unnormed_mask; shared_unnormed_masks[threadIdx.x] = unnormed_mask; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] / attention_shared_arr[0] : 0.0f; if (threadIdx.x < in_count) { masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask; } dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f; attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ? mask * in : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0]; } } void VectorAttentionForward(const std::vector<dtype*> &ins, const std::vector<dtype*> &unnormeds, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &masks, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_arr; in_arr.init((dtype**)ins.data(), ins.size()); NumberPointerArray unnormed_arr; unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size()); NumberPointerArray mask_arr; mask_arr.init((dtype**)masks.data(), masks.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); hipLaunchKernelGGL(( KernelVectorAttentionForward), dim3(block_dim), dim3(thread_count), 2 * thread_count * sizeof(dtype), 0, (const dtype**)in_arr.value, (const dtype**)unnormed_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_arr.value, val_arr.value); } __global__ void KernelVectorAttentionMaskAndInLoss(const dtype **losses, const dtype **in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype **mask_losses, dtype **in_losses) { // blockIdx.x : in_count_i // blockIdx.y : count_i // threadIdx.x : dim_i int in_count = in_counts[blockIdx.y]; int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x; if (in_count <= blockIdx.x) { return; } for (int i = threadIdx.x; i < dim; i += blockDim.x) { atomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] * masks[blockIdx.y][max_in_count * i + blockIdx.x]); mask_losses[blockIdx.y][max_in_count * i + blockIdx.x] = losses[blockIdx.y][i] * in_vals[global_in_count_i][i]; } } void VectorAttentionMaskAndInLoss(const dtype** losses, const dtype** in_vals, const dtype** masks, const int *in_counts, int max_in_count, int count, int dim, dtype **mask_losses, dtype **in_losses) { dim3 block_dim(max_in_count, count, 1); int thread_count = 8; if (dim >= TPB) { thread_count = TPB; } else { while (dim > thread_count) { thread_count <<= 1; } } hipLaunchKernelGGL(( KernelVectorAttentionMaskAndInLoss), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, losses, in_vals, masks, in_counts, max_in_count, count, dim, mask_losses, in_losses); } __global__ void KernelVectorAttentionBackward(const dtype** masks, const dtype **mask_losses, const int *in_counts, int max_in_count, int count, int dim, dtype **unnormed_losses) { __shared__ volatile extern dtype shared_att_bckwrd_arr[]; int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x; int in_count = in_counts[blockIdx.x]; if (threadIdx.x < in_count) { atomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y, masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x]); } shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ? masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_att_bckwrd_arr[threadIdx.x] += shared_att_bckwrd_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x < in_count) { atomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y, -shared_att_bckwrd_arr[0] * masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x]); } } void VectorAttentionBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals, const std::vector<dtype*> &masks, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses, std::vector<dtype*> &unnormed_losses) { NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr, in_val_arr; loss_arr.init((dtype**)losses.data(), losses.size()); mask_arr.init((dtype**)masks.data(), masks.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); unnormed_loss_arr.init((dtype**)unnormed_losses.data(), unnormed_losses.size()); in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); std::vector<std::shared_ptr<NumberArray>> mask_losses; mask_losses.reserve(count); for (int i = 0; i < count; ++i) { std::shared_ptr<NumberArray> p = std::make_shared<NumberArray>(); p->init(max_in_count * dim); mask_losses.push_back(p); } std::vector<dtype*> raw_mask_losses; raw_mask_losses.reserve(count); for (auto &p : mask_losses) { raw_mask_losses.push_back(p->value); } NumberPointerArray mask_loss_arr; mask_loss_arr.init((dtype**)raw_mask_losses.data(), mask_losses.size()); VectorAttentionMaskAndInLoss((const dtype**)loss_arr.value, (const dtype**)in_val_arr.value, (const dtype**)mask_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_loss_arr.value, in_loss_arr.value); dim3 block_dim(count, dim, 1); int thread_count = 8; while (thread_count < max_in_count) { thread_count <<= 1; } hipLaunchKernelGGL(( KernelVectorAttentionBackward), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, (const dtype**)mask_arr.value, (const dtype**)mask_loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, unnormed_loss_arr.value); } __global__ void KernelPMultiForward(const dtype **ins1, const dtype **ins2, int count, int dim, bool on_training, const dtype* drop_mask, dtype drop_factor, dtype** vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype dropout = drop_factor > 0 ? drop_mask[dim_i * count + count_i] : 1; vals[count_i][dim_i] = drop_factor < dropout ? ins1[count_i][dim_i] * ins2[count_i][dim_i] : 0.0f; if (on_training) { if (drop_factor > 0.0f && drop_mask[dim_i * count + count_i] < drop_factor) { vals[count_i][dim_i] = 0.0f; } else { vals[count_i][dim_i] = ins1[count_i][dim_i] * ins2[count_i][dim_i]; } } else { vals[count_i][dim_i] = (1 - drop_factor) * ins1[count_i][dim_i] * ins2[count_i][dim_i]; } } } void PMultiForward(const std::vector<dtype*> &ins1, const std::vector<dtype*> &ins2, int count, int dim, bool on_training, const dtype* drop_mask, dtype dropout, std::vector<dtype*> &vals) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray ins1_arr, ins2_arr, vals_arr; ins1_arr.init((dtype**)ins1.data(), count); ins2_arr.init((dtype**)ins2.data(), count); vals_arr.init((dtype**)vals.data(), count); if (dropout < 0) { dropout = 0; } hipLaunchKernelGGL(( KernelPMultiForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)ins1_arr.value, (const dtype**)ins2_arr.value, count, dim, on_training,drop_mask, dropout, vals_arr.value); } __global__ void KernelPMultiBackward(const dtype **losses, const dtype **in_vals1, const dtype **in_vals2, int count, int dim, const dtype *drop_mask, dtype drop_factor, dtype** in_losses1, dtype** in_losses2) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype dropout = drop_factor > 0 ? drop_mask[dim_i * count + count_i] : 1; if (drop_factor < dropout) { DeviceAtomicAdd(in_losses1[count_i] + dim_i, losses[count_i][dim_i] * in_vals2[count_i][dim_i]); DeviceAtomicAdd(in_losses2[count_i] + dim_i, losses[count_i][dim_i] * in_vals1[count_i][dim_i]); } } } void PMultiBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals1, const std::vector<dtype*> &in_vals2, int count, int dim, const dtype* drop_mask, dtype drop_factor, std::vector<dtype*> &in_losses1, std::vector<dtype*> &in_losses2) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray losses_arr, in_vals1_arr, in_vals2_arr, in_losses1_arr, in_losses2_arr; losses_arr.init((dtype**)losses.data(), losses.size()); in_vals1_arr.init((dtype**)in_vals1.data(), in_vals1.size()); in_vals2_arr.init((dtype**)in_vals2.data(), in_vals2.size()); in_losses1_arr.init((dtype**)in_losses1.data(), in_losses1.size()); in_losses2_arr.init((dtype**)in_losses2.data(), in_losses2.size()); hipLaunchKernelGGL(( KernelPMultiBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)losses_arr.value, (const dtype**)in_vals1_arr.value, (const dtype**)in_vals2_arr.value, count, dim, drop_mask, drop_factor, in_losses1_arr.value, in_losses2_arr.value); } __global__ void KernelPAddForward(const dtype*** ins, int count, int dim, int in_count, const dtype *drop_mask, dtype drop_factor, dtype **vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i+= step) { int count_i = i / dim; int dim_i = i % dim; dtype dropout = drop_factor > 0 ? drop_mask[dim_i * count + count_i] : 1; if (drop_factor < dropout) { dtype sum = ins[0][count_i][dim_i]; for (int j = 1; j < in_count; ++j) { sum += ins[j][count_i][dim_i]; } vals[count_i][dim_i] = sum; } else { vals[count_i][dim_i] = 0.0f; } } } void PAddForward(const std::vector<std::vector<dtype*>> &ins, int count, int dim, int in_count, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &vals) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(ins.size()); for (const std::vector<dtype*> &x : ins) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> ins_gpu; ins_gpu.reserve(ins.size()); for (auto &ptr : gpu_addr) { ins_gpu.push_back(ptr->value); } NumberPointerPointerArray in_arr; in_arr.init(ins_gpu.data(), ins_gpu.size()); NumberPointerArray out_arr; out_arr.init(vals.data(), vals.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelPAddForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype***)in_arr.value, count, dim, in_count, drop_mask, drop_factor, out_arr.value); } __global__ void KernelPAddBackward(const dtype **losses, int count, int dim, int in_count, const dtype *drop_mask, dtype drop_factor, dtype ***in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int dim_mul_count = dim * count; for (int i = index; i < dim_mul_count * in_count; i += step) { int in_count_i = i / dim_mul_count; int dim_mul_count_i = i % dim_mul_count; int count_i = dim_mul_count_i / dim; int dim_i = dim_mul_count_i % dim; dtype dropout = drop_factor > 0 ? drop_mask[dim_i * count + count_i] : 1; if (drop_factor < dropout) { DeviceAtomicAdd(in_losses[in_count_i][count_i] + dim_i, losses[count_i][dim_i]); } } } void PAddBackward(const std::vector<dtype*> &losses, int count, int dim, int in_count, const dtype *drop_mask, dtype drop_factor, std::vector<std::vector<dtype*>> &in_losses) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(in_losses.size()); for (const std::vector<dtype*> &x : in_losses) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> in_losses_gpu; in_losses_gpu.reserve(in_losses.size()); for (auto &ptr : gpu_addr) { in_losses_gpu.push_back(ptr->value); } NumberPointerPointerArray in_loss_arr; in_loss_arr.init(in_losses_gpu.data(), in_losses_gpu.size()); NumberPointerArray out_loss_arr; out_loss_arr.init((dtype**)losses.data(), losses.size()); int block_count = DefaultBlockCount(in_count * count * dim); hipLaunchKernelGGL(( KernelPAddBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)out_loss_arr.value, count, dim, in_count, drop_mask, drop_factor, in_loss_arr.value); } int NextTwoIntegerPowerNumber(int number) { int result = 1; while (number > result) { result <<= 1; } return result; } __global__ void KernelSoftMaxLoss(const dtype **vals, dtype **losses, int *correct_count, int *answers, int batchsize, int count, int dim) { volatile __shared__ int opt_label; volatile __shared__ dtype shared_val[TPB]; volatile __shared__ int64_t max_indexes[TPB]; volatile __shared__ dtype scores_sum[TPB]; volatile __shared__ dtype scores[TPB]; int dim_i = threadIdx.x; int count_i = blockIdx.x; if (count_i == 0 && dim_i == 0) { *correct_count = 0; } shared_val[dim_i] = dim_i < dim ? vals[count_i][dim_i] : -INFINITY; max_indexes[dim_i] = dim_i; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (shared_val[threadIdx.x + i] > shared_val[threadIdx.x]) { // race shared_val[threadIdx.x] = shared_val[threadIdx.x + i]; // race max_indexes[threadIdx.x] = max_indexes[threadIdx.x + i]; // race } __syncthreads(); } if (threadIdx.x == 0) { opt_label = max_indexes[0]; if (answers[count_i] == opt_label) { atomicAdd(correct_count, 1); } } __syncthreads(); dtype max_score = vals[count_i][opt_label]; dtype score = dim_i < dim ? cuda_exp(vals[count_i][dim_i] - max_score) : 0.0f; scores[dim_i] = score; scores_sum[dim_i] = score; for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { scores_sum[threadIdx.x] = scores_sum[threadIdx.x] + scores_sum[threadIdx.x + i]; // race __syncthreads(); } if (dim_i < dim) { losses[count_i][dim_i] = (scores[dim_i] / scores_sum[0] - (dim_i == answers[count_i] ? 1 : 0)) / batchsize; } } void SoftMaxLoss(const std::vector<dtype*> &vals, std::vector<dtype*> &losses, int *correct_count, const std::vector<int> &answers, int batchsize, int count, int dim) { if (dim > TPB) { abort(); } int thread_count = NextTwoIntegerPowerNumber(dim); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray answer_arr; answer_arr.init((int*)answers.data(), answers.size()); hipLaunchKernelGGL(( KernelSoftMaxLoss), dim3(count), dim3(thread_count), 0, 0, const_cast<const dtype **>(val_arr.value), const_cast<dtype **>(loss_arr.value), correct_count, answer_arr.value, batchsize, count, dim); } __global__ void Predict(const dtype *val, int dim, int *result) { __shared__ volatile dtype shared_vals[TPB]; __shared__ volatile dtype shared_indexes[TPB]; int index = DeviceDefaultIndex(); shared_indexes[threadIdx.x] = threadIdx.x; if (index < threadIdx.x) { shared_vals[threadIdx.x] = val[threadIdx.x]; } else { shared_vals[threadIdx.x] = -10000000.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (shared_vals[threadIdx.x] > shared_vals[threadIdx.x + i]) { shared_vals[threadIdx.x] = shared_vals[threadIdx.x + i]; shared_indexes[threadIdx.x] = threadIdx.x + i; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_indexes[0]; } } int Predict(const dtype* val, int dim) { if (dim > TPB) { abort(); } int thread_count = NextTwoIntegerPowerNumber(dim); DeviceInt result; result.init(); hipLaunchKernelGGL(( Predict), dim3(1), dim3(thread_count), 0, 0, val, dim, result.value); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelSquareSum(const dtype *v, int len, dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { is_last_block = false; } shared_sum[threadIdx.x] = 0.0f; for (int i = index; i < len; i += blockDim.x * gridDim.x) { shared_sum[threadIdx.x] += v[i] * v[i]; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { float sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, int len) { int block_count = DefaultBlockCount(len); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); hipLaunchKernelGGL(( KernelSquareSum), dim3(block_count), dim3(TPB), 0, 0, v, len, global_sum.value, block_counter.value, result.value); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelSquareSum(const dtype *v, const bool *indexers, int count, int dim, dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { global_sum[blockIdx.x] = 0.0f; is_last_block = false; } int count_i = index / dim; if (index < count * dim && indexers[count_i]) { shared_sum[threadIdx.x] = v[index] * v[index]; } else { shared_sum[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { float sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, const bool *indexers, int count, int dim) { int block_count = DefaultBlockCountWithoutLimit(count * dim); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); hipLaunchKernelGGL(( KernelSquareSum), dim3(block_count), dim3(TPB), 0, 0, v, indexers, count, dim, global_sum.value, block_counter.value, result.value); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelRescale(dtype *v, int len, dtype scale) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i += step) { v[i] *= scale; } } void Rescale(dtype *v, int len, dtype scale) { int block_count = DefaultBlockCount(len); hipLaunchKernelGGL(( KernelRescale), dim3(block_count), dim3(TPB), 0, 0, v, len, scale); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps, dtype x) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x; dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); dtype x = 1.0f / (1 - pow(belta1, iter + 1)); hipLaunchKernelGGL(( KernelUpdateAdam), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_mean, aux_square, iter, belta1, belta2, alpha, reg, eps, x); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / col; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iters[count_i] + 1)) / (1 - cuda_pow(belta1, iters[count_i] + 1)); dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } } __global__ void KernelSelfPlusIters(const bool *indexers, int *iters, int count) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count; i += step) { if (indexers[i]) { ++iters[i]; } } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); hipLaunchKernelGGL(( KernelUpdateAdam), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_mean, aux_square, indexers, iters, belta1, belta2, alpha, reg, eps); block_count = DefaultBlockCount(row); hipLaunchKernelGGL(( KernelSelfPlusIters), dim3(block_count), dim3(TPB), 0, 0, indexers, iters, row); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); hipLaunchKernelGGL(( KernelUpdateAdagrad), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_square, alpha, reg, eps); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / col; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); hipLaunchKernelGGL(( KernelUpdateAdagrad), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_square, indexers, alpha, reg, eps); } void *GraphHostAlloc() { void *m; CallCuda(hipHostMalloc(&m, 10000000, hipHostMallocWriteCombined)); if (m == NULL) { abort(); } return m; } }
6001449c49743c421b34927ab888779880ed15b8.cu
#include "n3ldg_cuda.h" #include <array> #include <cstdlib> #include <vector> #include <algorithm> #include <cmath> #include <cstdio> #include <cublas_v2.h> #include "cuPrintf.cuh" #include "cuPrintf.cu" #include "memory_pool.h" #include <curand.h> #include <curand_kernel.h> #include "cnmem.h" #include <string> #include <cstring> #include <cstdint> #include <chrono> #include <thread> #include <numeric> #include <memory> #include "profiler.h" namespace n3ldg_cuda { using std::cout; using std::endl; #if USE_FLOAT #define cuda_sqrt(x) sqrtf(x) #define cuda_pow(x, y) powf(x, y) #define cuda_tanh(x) tanhf(x) #define cuda_exp(x) __expf(x) #else #define cuda_sqrt(x) sqrt(x) #define cuda_pow(x, y) pow(x, y) #define cuda_tanh(x) tanh(x) #define cuda_exp(x) exp(x) #endif #define KERNEL_LOG #ifdef KERNEL_LOG #define KernelPrintLine(format, ...)\ {\ cuPrintf("block:x=%d,y=%d thread:x=%d,y=%d "#format"\n", blockIdx.x,\ blockIdx.y, threadIdx.x, threadIdx.y,__VA_ARGS__);\ } #else #define KernelPrintLine(format, ...) #endif constexpr int TPB = 1024; constexpr int BLOCK_COUNT = 56; void CallCuda(cudaError_t status) { if (status != cudaSuccess) { cout << cudaGetErrorString(status) << endl; abort(); } } void CallCnmem(cnmemStatus_t status) { assert(status == CNMEM_STATUS_SUCCESS); } void CallCublas(cublasStatus_t status) { assert(status == CUBLAS_STATUS_SUCCESS); } void CallCurand(curandStatus status) { assert(status == CURAND_STATUS_SUCCESS); } cublasHandle_t& GetCublasHandle() { static cublasHandle_t handle; static bool init; if (!init) { init = true; CallCublas(cublasCreate(&handle)); } return handle; } cudaError_t MyCudaMemcpy(void *dest, const void *src, size_t count, cudaMemcpyKind kind) { cudaError_t e = cudaMemcpy(dest, src, count, kind); return e; } void NumberPointerArray::init(dtype **host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype*))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*), cudaMemcpyHostToDevice)); this->len = len; } NumberPointerArray::~NumberPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Memcpy(dtype *dest, dtype*src, int size, cudaMemcpyKind kind) { CallCuda(MyCudaMemcpy(dest, src, size, kind)); } void NumberPointerPointerArray::init(dtype ***host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype**))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype*), cudaMemcpyHostToDevice)); this->len = len; } NumberPointerPointerArray::~NumberPointerPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void NumberArray::init(int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(dtype))); this->len = len; } void NumberArray::init(dtype *host_arr, int len) { init(len); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(dtype), cudaMemcpyHostToDevice)); } NumberArray::~NumberArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void DeviceInt::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceInt::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(int), cudaMemcpyDeviceToHost)); } void DeviceInt::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, &v, sizeof(int), cudaMemcpyHostToDevice)); } DeviceInt::~DeviceInt() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void DeviceNumber::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceNumber::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(dtype), cudaMemcpyDeviceToHost)); } DeviceNumber::~DeviceNumber() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void IntPointerArray::init(int **host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int*))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int*), cudaMemcpyHostToDevice)); this->len = len; } IntPointerArray::~IntPointerArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void IntArray::init(int *host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(int), cudaMemcpyHostToDevice)); this->len = len; } void IntArray::init(int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(int))); this->len = len; } IntArray::~IntArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void BoolArray::init(bool *host_arr, int len) { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, len * sizeof(bool))); CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool), cudaMemcpyHostToDevice)); this->len = len; } void BoolArray::copyFromHost(bool *host_arr) { CallCuda(MyCudaMemcpy(value, host_arr, len * sizeof(bool), cudaMemcpyHostToDevice)); } void BoolArray::copyToHost(bool *host_arr) { CallCuda(MyCudaMemcpy(host_arr, value, len * sizeof(bool), cudaMemcpyDeviceToHost)); } BoolArray::~BoolArray() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor1D::init(int dim) { initOnDevice(dim); #if TEST_CUDA v = new dtype[dim]; zero(); #endif } void Tensor1D::initOnDevice(int dim) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, dim * sizeof(dtype))); this->dim = dim; } Tensor1D::Tensor1D(const Tensor1D &t) { dim = t.dim; memcpy(v, t.v, dim *sizeof(dtype)); CallCuda(MyCudaMemcpy(value, t.value, dim * sizeof(dtype), cudaMemcpyDeviceToDevice)); } Tensor1D::~Tensor1D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } if (v != NULL) { delete []v; } } void Tensor1D::copyFromHostToDevice() { assert(v != NULL); assert(value != NULL); CallCuda(MyCudaMemcpy(value, v, dim * sizeof(dtype), cudaMemcpyHostToDevice)); } void Tensor1D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, dim * sizeof(dtype), cudaMemcpyDeviceToHost)); } void Tensor2D::initOnMemoryAndDevice(int row, int col) { initOnDevice(row, col); v = new dtype[row * col]; zero(); } void Tensor2D::init(int row, int col) { initOnDevice(row, col); #if TEST_CUDA v = new dtype[row * col]; zero(); #endif } void Tensor2D::initOnDevice(int row, int col) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, row * col * sizeof(dtype))); this->row = row; this->col = col; this->size = row * col; } Tensor2D::Tensor2D(const Tensor2D &t) { row = t.row; col = t.col; memcpy(v, t.v, sizeof(dtype) * row * col); CallCuda(MyCudaMemcpy(value, t.value, sizeof(dtype) * row * col, cudaMemcpyDeviceToDevice)); } Tensor2D::~Tensor2D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } if (v != NULL) { delete [] v; } } void Tensor2D::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, v, size * sizeof(dtype), cudaMemcpyHostToDevice)); } void Tensor2D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, size * sizeof(dtype), cudaMemcpyDeviceToHost)); } void Assert(bool v) { #if TEST_CUDA if (!v) { abort(); } #endif } __device__ void DeviceAtomicAdd(float* address, float value) { float old = value; float new_old; do { new_old = atomicExch(address, 0.0f); new_old += old; } while ((old = atomicExch(address, new_old))!=0.0f); }; __device__ dtype cuda_dtanh(dtype y) { return 1.0f - y * y; } __device__ dtype cuda_sigmoid(dtype x) { return 1.0f / (1.0f + cuda_exp(-x)); } __device__ dtype cuda_dsigmoid(dtype y) { return y * (1.0f - y); } __device__ dtype cuda_relu(dtype x) { return x > 0.0f ? x : 0.0f; } __device__ dtype cuda_drelu(dtype x) { return x > 0.0f ? 1 : 0.0f; } __device__ dtype cuda_leaky_relu(dtype x) { return x > 0.0f ? x : -0.1f * x; } __device__ dtype cuda_dleaky_relu(dtype x) { return x > 0.0f ? 1.0f : -0.1f; } const dtype SELU_LAMBDA = 1.0507009873554804934193349852946; const dtype SELU_ALPHA = 1.6732632423543772848170429916717; __device__ dtype cuda_selu(dtype x) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA * (cuda_exp(x) - 1.0f) : SELU_LAMBDA * x; } __device__ dtype cuda_dselu(dtype x, dtype y) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA + y : SELU_LAMBDA; } void Random(dtype *v, int len, dtype bound) { dtype *mem = (dtype*)malloc(len * sizeof(dtype)); assert(mem != NULL); dtype min = -bound, max = bound; for (int i = 0; i < len; i++) { mem[i] = (dtype(rand()) / RAND_MAX) * (max - min) + min; } CallCuda(MyCudaMemcpy(v, mem, len * sizeof(dtype), cudaMemcpyHostToDevice)); free(mem); } __device__ int DeviceDefaultIndex() { return blockIdx.x * blockDim.x + threadIdx.x; } __device__ int DeviceDefaultStep() { return gridDim.x * blockDim.x; } __device__ dtype DeviceAbs(dtype d) { return d > 0 ? d : -d; } int DefaultBlockCount(int len) { int block_count = (len - 1 + TPB) / TPB; return std::min(block_count, BLOCK_COUNT); } int DefaultBlockCountWithoutLimit(int len) { return (len - 1 + TPB) / TPB; } __global__ void KernelZero(dtype *v, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) { return; } v[index] = 0; } void Zero(dtype *v, int len) { int block_count = (len - 1 + TPB) / TPB; KernelZero<<<block_count, TPB>>>(v, len); } __global__ void PrintPointers(void **p, int len) { for (int i = 0; i < len; ++i) { printf("%p\n", p[i]); } } __global__ void KernelPrintNums(const dtype* p, int len) { for (int i = 0; i < len; ++i) { printf("%f\n", p[i]); } } void PrintNums(const dtype* p, int len) { KernelPrintNums<<<1, 1>>>(p, len); cudaDeviceSynchronize(); } __global__ void KernelPrintInts(const int* p, int len) { for (int i = 0; i < len; ++i) { printf("%d\n", p[i]); } } void PrintInts(const int* p, int len) { KernelPrintInts<<<1, 1>>>(p, len); cudaDeviceSynchronize(); } void InitCuda() { CallCuda(cudaSetDeviceFlags(cudaDeviceMapHost)); #if DEVICE_MEMORY == 0 cnmemDevice_t device; device.size = 2000000000; device.device = 0; cnmemInit(1, &device, CNMEM_FLAGS_DEFAULT); #else CallCuda(cudaSetDevice(0)); #endif CallCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); CallCuda(cudaPrintfInit()); } void EndCuda() { cudaPrintfEnd(); Profiler::Ins().Print(); } __global__ void KernelCopyFromOneVectorToMultiVectors(const dtype *src, dtype **dest, int count, int len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * len; i += step) { int count_i = i % count; int len_i = i / count; dest[count_i][len_i] = src[i]; } } void CopyFromOneVectorToMultiVals(const dtype *src, std::vector<dtype*> &vals, int count, int len) { NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); int block_count = (len * count - 1 + TPB) / TPB; block_count = std::min(block_count, BLOCK_COUNT); KernelCopyFromOneVectorToMultiVectors<<<block_count, TPB>>>(src, val_arr.value, count, len); } __global__ void KernelActivated(ActivatedEnum activated, const dtype *src, dtype**dest, dtype* dest2, int count, int len, bool is_being_trained, dtype drop_factor, const dtype *drop_mask) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = blockDim.x * gridDim.x; for (int i = index; i < len * count; i += step) { int count_i = i % count; int len_i = i / count; dtype result; if (activated == ActivatedEnum::TANH) { result = cuda_tanh(src[i]); } else if (activated == ActivatedEnum::SIGMOID) { result = cuda_sigmoid(src[i]); } else if (activated == ActivatedEnum::RELU) { result = cuda_relu(src[i]); } else if (activated == ActivatedEnum::LEAKY_RELU) { result = cuda_leaky_relu(src[i]); } else if (activated == ActivatedEnum::SELU) { result = cuda_selu(src[i]); } else { printf("KernelActivated error\n"); return; } if (is_being_trained) { if (drop_factor > 0 && drop_mask[i] <= drop_factor) { dest[count_i][len_i] = 0.0f; dest2[i] = result; } else { dest[count_i][len_i] = result; dest2[i] = result; } } else { dest[count_i][len_i] = result * (1 - drop_factor); dest2[i] = result; } } } void Activated(ActivatedEnum activated, const dtype *src, const std::vector<dtype*>& dest, dtype *dest2, int len, bool is_being_trained, dtype drop_factor, const dtype *drop_mask) { if (drop_factor < 0) { drop_factor = 0; } int count = dest.size(); NumberPointerArray dest_arr; dest_arr.init((dtype**)dest.data(), dest.size()); int block_count = std::min((len * count - 1 + TPB) / TPB, BLOCK_COUNT); KernelActivated<<<block_count, TPB>>>(activated, src, dest_arr.value, dest2, count, len, is_being_trained, drop_factor, drop_mask); } __global__ void KernelTanhForward(ActivatedEnum activated, const dtype** xs, int count, int dim, const dtype* drop_mask, dtype drop_factor, dtype**ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (drop_factor > 0.0f && drop_mask[i] < drop_factor) { ys[count_i][dim_i] = 0.0f; } else { if (activated == ActivatedEnum::TANH) { ys[count_i][dim_i] = cuda_tanh(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { ys[count_i][dim_i] = cuda_sigmoid(xs[count_i][dim_i]); } else { printf("error\n"); } } } } void TanhForward(ActivatedEnum activated, const std::vector<dtype*> &xs, int count, int dim, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &ys) { if (drop_factor < 0) { drop_factor = 0.0f; } NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); KernelTanhForward<<<block_count, TPB>>>(activated, (const dtype**)x_arr.value, count, dim, drop_mask, drop_factor, y_arr.value); } __global__ void KernelTanhBackward(ActivatedEnum activated, const dtype **losses, const dtype **vals, int count, int dim, const dtype* drop_mask, dtype drop_factor, dtype** in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (drop_factor <= 0.0f || drop_mask[i] > drop_factor) { dtype v; if (activated == ActivatedEnum::TANH) { v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i] * vals[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { v = losses[count_i][dim_i] * (1 - vals[count_i][dim_i]) * vals[count_i][dim_i]; } atomicAdd(in_losses[count_i] + dim_i, v); } } } void TanhBackward(ActivatedEnum activated, const std::vector<dtype*> &losses, const std::vector<dtype*> &vals, int count, int dim, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &in_losses) { if (drop_factor < 0) { drop_factor = 0.0f; } NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * dim); KernelTanhBackward<<<block_count, TPB>>>(activated ,(const dtype**)loss_arr.value, (const dtype**)val_arr.value, count, dim, drop_mask, drop_factor, in_loss_arr.value); } __global__ void KernelDropoutForward(const dtype** xs, int count, int dim, const dtype* drop_mask, dtype drop_factor, dtype**ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (drop_factor > 0.0f && drop_mask[i] < drop_factor) { ys[count_i][dim_i] = 0.0f; } else { ys[count_i][dim_i] = xs[count_i][dim_i]; } } } void DropoutForward(const std::vector<dtype*> &xs, int count, int dim, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &ys) { if (drop_factor < 0) { drop_factor = 0.0f; } NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); KernelDropoutForward<<<block_count, TPB>>>((const dtype**)x_arr.value, count, dim, drop_mask, drop_factor, y_arr.value); } __global__ void KernelDropoutBackward(const dtype **losses, const dtype **vals, int count, int dim, const dtype* drop_mask, dtype drop_factor, dtype** in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (drop_factor <= 0.0f || drop_mask[i] > drop_factor) { atomicAdd(in_losses[count_i] + dim_i, losses[count_i][dim_i]); } } } void DropoutBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &vals, int count, int dim, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &in_losses) { if (drop_factor < 0) { drop_factor = 0.0f; } NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * dim); KernelDropoutBackward<<<block_count, TPB>>>((const dtype**)loss_arr.value, (const dtype**)val_arr.value, count, dim, drop_mask, drop_factor, in_loss_arr.value); } __global__ void KernelCopyForUniNodeForward(const dtype** xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; int x_total_len = count * x_len; int b_total_len = count * b_len; for (int i = index; i < x_total_len + b_total_len; i += step) { if (i < x_total_len) { int len_i = i / count; int count_i = i % count; xs_dest[i] = xs[count_i][len_i]; } else if (use_b) { int b_i = i - x_total_len; int len_i = b_i / count; b_dest[b_i] = b[len_i]; } } } void CopyForUniNodeForward(const std::vector<dtype*> &xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { NumberPointerArray x_arr; x_arr.init((dtype**)xs.data(), xs.size()); int len = x_len + b_len; int block_count = std::min((count * len - 1 + TPB) / TPB, 56); KernelCopyForUniNodeForward<<<block_count, TPB>>>( (const dtype**)x_arr.value, (const dtype*)b, xs_dest, b_dest, count, x_len, b_len, use_b); } __global__ void KernelCopyForBiNodeForward(const dtype **x1s, const dtype **x2s, const dtype *b, dtype *x1s_dest, dtype *x2s_dest, dtype *b_dest, int count, int x1_len, int x2_len, int b_len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int x1_total_len = count * x1_len; int x2_total_len = count * x2_len; int b_total_len = count * b_len; int total_len = x1_total_len + x2_total_len + b_total_len; for (int i = index; i < total_len; i += step) { if (i < x2_total_len) { int len_i = i / count; int count_i = i % count; x2s_dest[i] = x2s[count_i][len_i]; } else if (i >= x2_total_len && i < x1_total_len + x2_total_len) { int len_i = (i - x2_total_len) / count; int count_i = (i - x2_total_len) % count; x1s_dest[i - x2_total_len] = x1s[count_i][len_i]; } else { int b_i = (i - x1_total_len - x2_total_len); int len_i = b_i / count; b_dest[b_i] = b[len_i]; } } } void CopyForBiNodeForward(const std::vector<dtype*>& x1s, const std::vector<dtype *>& x2s, const dtype *b, dtype *x1s_dest, dtype *x2s_dest, dtype *b_dest, int count, int x1_len, int x2_len, int b_len) { int len = x1_len + x2_len + b_len; int block_count = DefaultBlockCount(count * len); NumberPointerArray x1_arr, x2_arr; x1_arr.init((dtype**)x1s.data(), x1s.size()); x2_arr.init((dtype**)x2s.data(), x2s.size()); KernelCopyForBiNodeForward<<<block_count, TPB>>>( (const dtype**)x1_arr.value, (const dtype**)x2_arr.value, b, x1s_dest, x2s_dest, b_dest, count, x1_len, x2_len, b_len); } void MatrixMultiplyMatrix(dtype *W, dtype *x, dtype *y, int row, int col, int count, bool useb, bool should_x_transpose, bool should_W_transpose) { cublasHandle_t &handle = GetCublasHandle(); float alpha = 1; float beta = useb? 1 : 0; cublasOperation_t x_op = should_x_transpose ? CUBLAS_OP_T : CUBLAS_OP_N; int ldx = should_x_transpose ? col : count; cublasOperation_t W_op = should_W_transpose ? CUBLAS_OP_T : CUBLAS_OP_N; int ldw = should_W_transpose ? row : col; #if USE_FLOAT CallCublas(cublasSgemm(handle, x_op, W_op, count, row, col, &alpha, x, ldx, W, ldw, &beta, y, count)); #else CallCublas(cublasDgemm(handle, x_op, W_op, count, row, col, &alpha, x, ldx, W, ldw, &beta, y, count)); #endif } __global__ void KernelVerify(dtype *host, dtype *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { dtype loss = host[index] - device[index]; if (DeviceAbs(loss) > 0.0001) { *success = false; printf("KernelVerify %s: host:%f device:%f loss:%f index:%d\n", message, host[index], device[index], loss, index); KernelPrintLine("KernelVerify: host:%f device:%f loss:%f", host[index], device[index], loss); } } } bool Verify(dtype *host, dtype *device, int len, const char* message) { NumberArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), cudaMemcpyHostToDevice)); KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), cudaMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); cudaDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } __global__ void KernelVerify(bool *host, bool *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(bool *host, bool *device, int len, const char* message) { BoolArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), cudaMemcpyHostToDevice)); KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), cudaMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); cudaDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } __global__ void KernelVerify(int *host, int *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(int *host, int *device, int len, const char* message) { IntArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), cudaMemcpyHostToDevice)); KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), cudaMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); cudaDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } cudaError_t MemoryPool::Malloc(void **p, int size) { assert(*p == NULL); Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Malloc"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemMalloc(p, size, NULL)); profiler.EndEvent(); return cudaSuccess; #elif DEVICE_MEMORY == 1 cudaError_t r = cudaMalloc(p, size); profiler.EndEvent(); return r; #else int fit_size = 1; int n = 0; while (fit_size < size) { fit_size <<= 1; ++n; } cudaError_t status = cudaSuccess; if (free_blocks_.at(n).empty()) { status = cudaMalloc(p, fit_size); CallCuda(status); MemoryBlock block(*p, fit_size); busy_blocks_.insert(std::make_pair(*p, block)); } else { int this_size = free_blocks_.at(n).size(); MemoryBlock &block = free_blocks_.at(n).at(this_size - 1); *p = block.p; busy_blocks_.insert(std::make_pair(block.p, block)); free_blocks_.at(n).resize(this_size - 1); } profiler.EndEvent(); return status; #endif } cudaError_t MemoryPool::Free(void *p) { Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Free"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemFree(p, NULL)); profiler.EndEvent(); #elif DEVICE_MEMORY == 1 cudaError_t r = cudaFree(p); profiler.EndEvent(); return r; #else auto it = busy_blocks_.find(p); if (it == busy_blocks_.end()) { abort(); } int size = it->second.size; int n = 0; while (size > 1) { size >>= 1; ++n; } free_blocks_.at(n).push_back(it->second); busy_blocks_.erase(it); profiler.EndEvent(); return cudaSuccess; #endif } void Profiler::EndCudaEvent() { cudaDeviceSynchronize(); EndEvent(); } __global__ void KernelCalculateLtyForUniBackward(ActivatedEnum activated, const dtype *const*ly, const dtype *ty, const dtype *y, const dtype *drop_mask, dtype drop_factor, dtype *lty, int count, int dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = count * dim; for (int i = index; i < len; i += step) { int count_i = i % count; int dim_i = i / count; dtype yi = y[i]; if (drop_factor > 0.0f && drop_mask[i] < drop_factor) { lty[i] = 0.0f; } else { dtype lyv = ly[count_i][dim_i]; if (activated == ActivatedEnum::TANH) { lty[i] = lyv * cuda_dtanh(yi); } else if (activated == ActivatedEnum::SIGMOID) { lty[i] = lyv * cuda_dsigmoid(yi); } else if (activated == ActivatedEnum::RELU) { lty[i] = lyv * cuda_drelu(ty[i]); } else if (activated == ActivatedEnum::LEAKY_RELU) { lty[i] = lyv * cuda_dleaky_relu(ty[i]); } else if (activated == ActivatedEnum::SELU) { lty[i] = lyv * cuda_dselu(ty[i], yi); } else { printf("KernelCalculateLtyForUniBackward error\n"); } } } } void CalculateLtyForUniBackward(ActivatedEnum activated, const std::vector<dtype*> &ly, const dtype *ty, const dtype *y, const dtype *drop_mask, dtype drop_factor, dtype *lty, int count, int dim) { if (drop_factor < 0) { drop_factor = 0; } NumberPointerArray ly_arr; ly_arr.init((dtype**)ly.data(), ly.size()); int block_count = std::min(BLOCK_COUNT, (count * dim + TPB - 1) / TPB); KernelCalculateLtyForUniBackward<<<block_count, TPB>>>(activated, ly_arr.value, ty, y, drop_mask, drop_factor, lty, count, dim); cudaDeviceSynchronize(); } __global__ void KernelCalculateLyForLinearBackward(const dtype *const*ly_vec, dtype *ly, int count, int dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = count * dim; for (int i = index; i < len; i += step) { int count_i = i % count; int dim_i = i / count; ly[i] = ly_vec[count_i][dim_i]; } } void CalculateLyForLinearBackward(const std::vector<dtype*> &ly_vec, dtype *ly, int count, int dim) { NumberPointerArray ly_arr; ly_arr.init((dtype**)ly_vec.data(), ly_vec.size()); int block_count = std::min(BLOCK_COUNT, (count * dim + TPB - 1) / TPB); KernelCalculateLyForLinearBackward<<<block_count, TPB>>>(ly_arr.value, ly, count, dim); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward( const dtype *lty, const dtype *lx, dtype *b, dtype **losses, int count, int out_dim, int in_dim, dtype *block_sums, int *global_block_count, bool use_b) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (use_b) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } int lty_index = dim_i * count + count_i; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } DeviceAtomicAdd(b + dim_i, sum); } } } } else { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i * count + count_i; DeviceAtomicAdd(losses[count_i] + dim_i, lx[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForUniBackward(const dtype *lty, const dtype *lx, dtype *b, std::vector<dtype*> &losses, int count, int out_dim, int in_dim, bool use_b) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim, block_y, 1); NumberPointerArray loss_arr; loss_arr.init(losses.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward<<<block_dim, TPB>>>(lty, lx, b, loss_arr.value, count, out_dim, in_dim, block_sums.value, global_block_count_arr.value, use_b); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward( const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, dtype **losses1, dtype **losses2, int count, int out_dim, int in_dim1, int in_dim2, dtype *block_sums, int *global_block_count) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } int lty_index = dim_i * count + count_i; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } DeviceAtomicAdd(b + dim_i, sum); } } } else if (dim_i < out_dim + in_dim1) { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i * count + count_i; DeviceAtomicAdd(losses1[count_i] + dim_i, lx1[lx_index]); } } else { if (count_i < count) { dim_i -= (out_dim + in_dim1); int lx_index = dim_i * count + count_i; DeviceAtomicAdd(losses2[count_i] + dim_i, lx2[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForBiBackward(const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, std::vector<dtype*> &losses1, std::vector<dtype*> &losses2, int count, int out_dim, int in_dim1, int in_dim2) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim1 + in_dim2, block_y, 1); NumberPointerArray loss1_arr; loss1_arr.init(losses1.data(), count); NumberPointerArray loss2_arr; loss2_arr.init(losses2.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward<<<block_dim, TPB>>>(lty, lx1, lx2, b, loss1_arr.value, loss2_arr.value, count, out_dim, in_dim1, in_dim2, block_sums.value, global_block_count_arr.value); } constexpr int MAX_BATCH_COUNT = 1000000; __global__ void KernelInitCurandStates(curandState_t *states) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; for (int i = index; i < MAX_BATCH_COUNT; i += step) { curand_init(0, i, 0, &states[i]); } } curandState_t *GetCurandStates() { static curandState_t *states; if (states == NULL) { MemoryPool &pool = MemoryPool::Ins(); CallCuda(pool.Malloc((void**)&states, sizeof(curandState_t) * MAX_BATCH_COUNT)); KernelInitCurandStates<<<BLOCK_COUNT, TPB>>>( states); } return states; } curandGenerator_t &GetGenerator() { static curandGenerator_t gen; static bool init; if (!init) { CallCurand(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); CallCurand(curandSetPseudoRandomGeneratorSeed(gen, 0)); init = true; } return gen; } void CalculateDropoutMask(dtype drop_factor, int count, int dim, dtype* mask) { curandGenerator_t &gen = GetGenerator(); CallCurand(curandGenerateUniform(gen, mask, count * dim)); } __global__ void KernelConcatForward(dtype **ins, int *in_dims, dtype **outs, bool on_training, const dtype *drop_mask, dtype drop_factor, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; if (on_training) { if (drop_factor > 0.0f && drop_mask[out_dim_i * count + count_i] < drop_factor) { outs[count_i][out_dim_i] = 0.0f; } else { int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; dtype v = ins[count_i * in_count + offset_j][in_dim_i]; outs[count_i][out_dim_i] = v; } } else { int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; dtype v = ins[count_i * in_count + offset_j][in_dim_i]; outs[count_i][out_dim_i] = v * (1 - drop_factor); } } } void ConcatForward(const std::vector<dtype*> &in_vals, const std::vector<int> &in_dims, std::vector<dtype*> &vals, bool on_training, const dtype *drop_mask, dtype drop_factor, int count, int in_count, int out_dim) { assert(drop_factor < 1); if (drop_factor < 0) { drop_factor = 0; } int len = count * out_dim; int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_val_arr, val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); KernelConcatForward<<<block_count, TPB>>>(in_val_arr.value, in_dim_arr.value, val_arr.value, on_training, drop_mask, drop_factor, count, in_count, out_dim); } __global__ void KernelConcatBackward(dtype** in_losses, int *in_dims, dtype **out_losses, const dtype *drop_mask, dtype drop_factor, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; dtype dropout = drop_factor > 0 ? drop_mask[out_dim_i * count + count_i] : 1; if (dropout > drop_factor) { int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; DeviceAtomicAdd(in_losses[count_i * in_count + offset_j] + in_dim_i, out_losses[count_i][out_dim_i]); } } } void ConcatBackward(const std::vector<dtype*> &in_losses, const std::vector<int> &in_dims, std::vector<dtype*> &losses, const dtype *drop_mask, dtype drop_factor, int count, int in_count, int out_dim) { assert(drop_factor < 1); if (drop_factor < 0) { drop_factor = 0; } int len = count * out_dim; int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_loss_arr, loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); KernelConcatBackward<<<block_count, TPB>>>(in_loss_arr.value, in_dim_arr.value, loss_arr.value, drop_mask, drop_factor, count, in_count, out_dim); } __global__ void KernelMemset(dtype *p, int len, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(dtype *p, int len, dtype value) { int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); KernelMemset<<<block_count, TPB>>>(p, len, value); } __global__ void KernelMemset(bool *p, int len, bool value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(bool *p, int len, bool value) { int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); KernelMemset<<<block_count, TPB>>>(p, len, value); } void *Malloc(int size) { void *p; CallCuda(cudaMalloc(&p, size)); return p; } void Memcpy(void *dest, void *src, int size, cudaMemcpyKind kind) { CallCuda(cudaMemcpy(dest, src, size, kind)); } __global__ void KernelBatchMemset(dtype **p, int count, int dim, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count ; i += step) { int count_i = i / dim; int dim_i = i % dim; p[count_i][dim_i] = value; } } void BatchMemset(const std::vector<dtype*> &vec, int count, int dim, dtype value) { int block_count = (count * dim -1 + TPB) / TPB; block_count = std::min(block_count, BLOCK_COUNT); NumberPointerArray vec_arr; vec_arr.init((dtype**)vec.data(), vec.size()); KernelBatchMemset<<<block_count, TPB>>>(vec_arr.value, count, dim, value); } __global__ void KernelLookupForward(const int *xids, const dtype *vocabulary, bool on_training, const dtype *drop_mask, dtype drop_factor, int count, int dim, dtype **vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; if (on_training) { if (drop_factor > 0 && drop_mask[dim_i * count + count_i] < drop_factor) { vals[count_i][dim_i] = 0.0f; } else { int xid = xids[count_i]; if (xid >= 0) { int voc_i = xid * dim + dim_i; vals[count_i][dim_i] = vocabulary[voc_i]; } else { vals[count_i][dim_i] = 0.0f; } } } else { int xid = xids[count_i]; if (xid >= 0) { int voc_i = xid * dim + dim_i; vals[count_i][dim_i] = vocabulary[voc_i] * (1 - drop_factor); } else { vals[count_i][dim_i] = 0.0f; } } } } void LookupForward(const std::vector<int> &xids, const dtype *vocabulary, bool on_training, const dtype *drop_mask, dtype drop_factor, int count, int dim, std::vector<dtype*> &vals) { if (drop_factor < 0) { drop_factor = 0; } int block_count = std::min(BLOCK_COUNT, (count * dim - 1 + TPB) / TPB); IntArray xid_arr; xid_arr.init((int*)xids.data(), xids.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); KernelLookupForward<<<block_count, TPB>>>(xid_arr.value, vocabulary, on_training, drop_mask, drop_factor, count, dim, const_cast<dtype**>(val_arr.value)); } __global__ void KernelLookupBackward(const int *xids, int unknown_id, bool fine_tune, const dtype** losses, const dtype *drop_mask, dtype drop_factor, int count, int dim, dtype *grad, bool *indexers) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (xid == unknown_id || fine_tune) { assert(xid >= 0); if (dim_i == 0) { indexers[xid] = true; } dtype dropout = drop_factor > 0 ? drop_mask[dim_i * count + count_i] : 1; if (drop_factor < dropout) { DeviceAtomicAdd(grad + xid * dim + dim_i, losses[count_i][dim_i]); } } } } void LookupBackward(const std::vector<int> &xids, int unknown_id, bool fine_tune, const std::vector<dtype*> &losses, const dtype *drop_mask, dtype drop_factor, int count, int dim, dtype *grad, bool *indexers) { int block_count = std::min((count * dim - 1 + TPB) / TPB, BLOCK_COUNT); IntArray pl_arr; pl_arr.init((int*)xids.data(), xids.size()); IntArray xid_arr; xid_arr.init((int*)pl_arr.value, xids.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); KernelLookupBackward<<<block_count, TPB>>>( const_cast<const int *>(xid_arr.value), unknown_id, fine_tune, const_cast<const dtype**>(loss_arr.value), drop_mask, drop_factor, count, dim, grad, indexers); } __global__ void KernelPoolForward(PoolingEnum pooling, dtype **ins, int *in_counts, int max_in_count, dtype **outs, int count, int dim, int* hit_inputs) { __shared__ volatile extern dtype pool_shared_arr[]; volatile dtype* shared_indexers = pool_shared_arr + blockDim.x; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = ins[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = pooling == PoolingEnum::MAX ? -INFINITY : INFINITY; } shared_indexers[threadIdx.x] = threadIdx.x; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; if (pooling == PoolingEnum::MAX) { if (pool_shared_arr[threadIdx.x] < pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } else { if (pool_shared_arr[threadIdx.x] > pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } } __syncthreads(); } if (threadIdx.x == 0) { hit_inputs[batch_i * dim + dim_i] = shared_indexers[0]; outs[batch_i][dim_i] = pool_shared_arr[0]; } } void PoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, std::vector<dtype*> &vals, int count, const std::vector<int> &in_counts, int dim, int *hit_inputs) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); KernelPoolForward<<<block_dim, thread_count, thread_count * 2 * sizeof(dtype)>>>(pooling, in_val_arr.value, in_count_arr.value, max_in_count, val_arr.value, count, dim, hit_inputs); } __global__ void KernelPoolBackward(const dtype ** losses, const int *hit_inputs, int max_in_count, int count, int dim, dtype **in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; int input_i = hit_inputs[i]; dtype loss = losses[count_i][dim_i]; DeviceAtomicAdd(in_losses[count_i * max_in_count + input_i] + dim_i, loss); } } void PoolBackward(const std::vector<dtype*> &losses, std::vector<dtype*> &in_losses, const std::vector<int> &in_counts, const int *hit_inputs, int count, int dim) { NumberPointerArray loss_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int block_count = (count * dim - 1 + TPB) / TPB; block_count = std::min(block_count, BLOCK_COUNT); KernelPoolBackward<<<block_count, TPB>>>((const dtype**)loss_arr.value, hit_inputs, max_in_count, count, dim, in_loss_arr.value); } __global__ void KernelSumPoolForward(PoolingEnum pooling, const dtype **in_vals, int count, int dim, const int *in_counts, int max_in_count, dtype **vals) { __shared__ volatile extern dtype pool_shared_arr[]; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = in_vals[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; pool_shared_arr[threadIdx.x] += pool_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[batch_i][dim_i] = pooling == PoolingEnum::SUM ? pool_shared_arr[0] : pool_shared_arr[0] / in_counts[batch_i]; } } void SumPoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, int count, int dim, const std::vector<int> &in_counts, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); KernelSumPoolForward<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>(pooling, (const dtype**)in_val_arr.value, count, dim, (const int*)in_count_arr.value, max_in_count, val_arr.value); } __global__ void KernelSumBackward(PoolingEnum pooling, const dtype **losses, const int *in_counts, int max_in_count, int count, int dim, dtype **in_losses) { int global_in_count_i = blockIdx.x * max_in_count + blockIdx.y; if (blockIdx.y < in_counts[blockIdx.x] && threadIdx.x < dim) { atomicAdd(in_losses[global_in_count_i] + threadIdx.x, pooling == PoolingEnum::SUM ? losses[blockIdx.x][threadIdx.x] : losses[blockIdx.x][threadIdx.x] / in_counts[blockIdx.x]); } } void SumPoolBackward(PoolingEnum pooling, const std::vector<dtype*> &losses, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses) { int thread_count = 8; while (thread_count < dim) { thread_count <<= 1; } int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); dim3 block_dim(count, max_in_count, 1); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray in_loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); KernelSumBackward<<<block_dim, thread_count>>>(pooling, (const dtype**)loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, in_loss_arr.value); } __global__ void KernelScalarAttentionForward(const dtype** ins, const dtype **unnormeds, const int *in_counts, int max_in_count, int count, int dim, dtype **masks, dtype **vals) { __shared__ volatile extern dtype attention_shared_arr[]; volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x; int count_i = blockIdx.y; int in_count = in_counts[count_i]; int dim_i = blockIdx.x; int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x; dtype unnormed_mask = threadIdx.x < in_count ? cuda_exp(unnormeds[global_in_count_i][0]) : 0.0f; attention_shared_arr[threadIdx.x] = unnormed_mask; shared_unnormed_masks[threadIdx.x] = unnormed_mask; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] / attention_shared_arr[0] : 0.0f; if (threadIdx.x < in_count) { masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask; } dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f; attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ? mask * in : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0]; } } void ScalarAttentionForward(const std::vector<dtype*> &ins, const std::vector<dtype*> &unnormeds, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &masks, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_arr; in_arr.init((dtype**)ins.data(), ins.size()); NumberPointerArray unnormed_arr; unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size()); NumberPointerArray mask_arr; mask_arr.init((dtype**)masks.data(), masks.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); KernelScalarAttentionForward<<<block_dim, thread_count, 2 * thread_count * sizeof(dtype)>>>((const dtype**)in_arr.value, (const dtype**)unnormed_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_arr.value, val_arr.value); } __global__ void KernelScalarAttentionMaskAndInLoss(const dtype **losses, const dtype **in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype *mask_losses, dtype **in_losses) { // blockIdx.x : in_count_i // blockIdx.y : count_i // threadIdx.x : dim_i __shared__ extern volatile dtype att_mask_loss_shared_arr[]; int in_count = in_counts[blockIdx.y]; int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x; if (in_count <= blockIdx.x) { return; } for (int i = threadIdx.x; i < dim; i += blockDim.x) { atomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] * masks[blockIdx.y][max_in_count * threadIdx.x + blockIdx.x]); } att_mask_loss_shared_arr[threadIdx.x] = 0.0f; for (int i = threadIdx.x; i < dim; i += blockDim.x) { att_mask_loss_shared_arr[threadIdx.x] += losses[blockIdx.y][i] * in_vals[global_in_count_i][i]; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { att_mask_loss_shared_arr[threadIdx.x] += att_mask_loss_shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { mask_losses[global_in_count_i] = att_mask_loss_shared_arr[0]; } } void ScalarAttentionMaskAndInLoss(const dtype** losses, const dtype** in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype *mask_losses, dtype **in_losses) { dim3 block_dim(max_in_count, count, 1); int thread_count = 8; if (dim >= TPB) { thread_count = TPB; } else { while (dim > thread_count) { thread_count <<= 1; } } KernelScalarAttentionMaskAndInLoss<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>(losses, in_vals, masks, in_counts, max_in_count, count, dim, mask_losses, in_losses); } __global__ void KernelScalarAttentionBackward(const dtype** masks, const dtype *mask_losses, const int *in_counts, int max_in_count, int count, int dim, dtype **unnormed_losses) { __shared__ volatile extern dtype shared_att_bckwrd_arr[]; int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x; int in_count = in_counts[blockIdx.x]; if (threadIdx.x < in_count && blockIdx.y == 0) { atomicAdd(unnormed_losses[global_in_count_i], masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[global_in_count_i]); } shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ? masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[global_in_count_i] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_att_bckwrd_arr[threadIdx.x] += shared_att_bckwrd_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x < in_count && blockIdx.y == 0) { atomicAdd(unnormed_losses[global_in_count_i], -shared_att_bckwrd_arr[0] * masks[blockIdx.x][threadIdx.x]); } } void ScalarAttentionBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals, const std::vector<dtype*> &masks, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses, std::vector<dtype*> &unnormed_losses) { NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr, in_val_arr; loss_arr.init((dtype**)losses.data(), losses.size()); mask_arr.init((dtype**)masks.data(), masks.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); unnormed_loss_arr.init((dtype**)unnormed_losses.data(), unnormed_losses.size()); in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); NumberArray mask_loss_arr; mask_loss_arr.init(count * max_in_count); ScalarAttentionMaskAndInLoss((const dtype**)loss_arr.value, (const dtype**)in_val_arr.value, (const dtype**)mask_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_loss_arr.value, in_loss_arr.value); dim3 block_dim(count, dim, 1); int thread_count = 8; while (thread_count < max_in_count) { thread_count <<= 1; } KernelScalarAttentionBackward<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>((const dtype**)mask_arr.value, (const dtype*)mask_loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, unnormed_loss_arr.value); } __global__ void KernelVectorAttentionForward(const dtype** ins, const dtype **unnormeds, const int *in_counts, int max_in_count, int count, int dim, dtype **masks, dtype **vals) { __shared__ volatile extern dtype attention_shared_arr[]; volatile dtype *shared_unnormed_masks = attention_shared_arr + blockDim.x; int count_i = blockIdx.y; int in_count = in_counts[count_i]; int dim_i = blockIdx.x; int global_in_count_i = blockIdx.y * max_in_count + threadIdx.x; dtype unnormed_mask = threadIdx.x < in_count ? cuda_exp(unnormeds[global_in_count_i][blockIdx.x]) : 0.0f; attention_shared_arr[threadIdx.x] = unnormed_mask; shared_unnormed_masks[threadIdx.x] = unnormed_mask; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } dtype mask = threadIdx.x < in_count ? shared_unnormed_masks[threadIdx.x] / attention_shared_arr[0] : 0.0f; if (threadIdx.x < in_count) { masks[blockIdx.y][blockIdx.x * max_in_count + threadIdx.x] = mask; } dtype in = threadIdx.x < in_count ? ins[global_in_count_i][dim_i] : 0.0f; attention_shared_arr[threadIdx.x] = threadIdx.x < in_count ? mask * in : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; attention_shared_arr[threadIdx.x] += attention_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[blockIdx.y][blockIdx.x] = attention_shared_arr[0]; } } void VectorAttentionForward(const std::vector<dtype*> &ins, const std::vector<dtype*> &unnormeds, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &masks, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_arr; in_arr.init((dtype**)ins.data(), ins.size()); NumberPointerArray unnormed_arr; unnormed_arr.init((dtype**)unnormeds.data(), unnormeds.size()); NumberPointerArray mask_arr; mask_arr.init((dtype**)masks.data(), masks.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); KernelVectorAttentionForward<<<block_dim, thread_count, 2 * thread_count * sizeof(dtype)>>>((const dtype**)in_arr.value, (const dtype**)unnormed_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_arr.value, val_arr.value); } __global__ void KernelVectorAttentionMaskAndInLoss(const dtype **losses, const dtype **in_vals, const dtype **masks, const int *in_counts, int max_in_count, int count, int dim, dtype **mask_losses, dtype **in_losses) { // blockIdx.x : in_count_i // blockIdx.y : count_i // threadIdx.x : dim_i int in_count = in_counts[blockIdx.y]; int global_in_count_i = blockIdx.y * max_in_count + blockIdx.x; if (in_count <= blockIdx.x) { return; } for (int i = threadIdx.x; i < dim; i += blockDim.x) { atomicAdd(in_losses[global_in_count_i] + i, losses[blockIdx.y][i] * masks[blockIdx.y][max_in_count * i + blockIdx.x]); mask_losses[blockIdx.y][max_in_count * i + blockIdx.x] = losses[blockIdx.y][i] * in_vals[global_in_count_i][i]; } } void VectorAttentionMaskAndInLoss(const dtype** losses, const dtype** in_vals, const dtype** masks, const int *in_counts, int max_in_count, int count, int dim, dtype **mask_losses, dtype **in_losses) { dim3 block_dim(max_in_count, count, 1); int thread_count = 8; if (dim >= TPB) { thread_count = TPB; } else { while (dim > thread_count) { thread_count <<= 1; } } KernelVectorAttentionMaskAndInLoss<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>(losses, in_vals, masks, in_counts, max_in_count, count, dim, mask_losses, in_losses); } __global__ void KernelVectorAttentionBackward(const dtype** masks, const dtype **mask_losses, const int *in_counts, int max_in_count, int count, int dim, dtype **unnormed_losses) { __shared__ volatile extern dtype shared_att_bckwrd_arr[]; int global_in_count_i = max_in_count * blockIdx.x + threadIdx.x; int in_count = in_counts[blockIdx.x]; if (threadIdx.x < in_count) { atomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y, masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x]); } shared_att_bckwrd_arr[threadIdx.x] = threadIdx.x < in_count ? masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] * mask_losses[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_att_bckwrd_arr[threadIdx.x] += shared_att_bckwrd_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x < in_count) { atomicAdd(unnormed_losses[global_in_count_i] + blockIdx.y, -shared_att_bckwrd_arr[0] * masks[blockIdx.x][blockIdx.y * max_in_count + threadIdx.x]); } } void VectorAttentionBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals, const std::vector<dtype*> &masks, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses, std::vector<dtype*> &unnormed_losses) { NumberPointerArray loss_arr, mask_arr, in_loss_arr, unnormed_loss_arr, in_val_arr; loss_arr.init((dtype**)losses.data(), losses.size()); mask_arr.init((dtype**)masks.data(), masks.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); unnormed_loss_arr.init((dtype**)unnormed_losses.data(), unnormed_losses.size()); in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); std::vector<std::shared_ptr<NumberArray>> mask_losses; mask_losses.reserve(count); for (int i = 0; i < count; ++i) { std::shared_ptr<NumberArray> p = std::make_shared<NumberArray>(); p->init(max_in_count * dim); mask_losses.push_back(p); } std::vector<dtype*> raw_mask_losses; raw_mask_losses.reserve(count); for (auto &p : mask_losses) { raw_mask_losses.push_back(p->value); } NumberPointerArray mask_loss_arr; mask_loss_arr.init((dtype**)raw_mask_losses.data(), mask_losses.size()); VectorAttentionMaskAndInLoss((const dtype**)loss_arr.value, (const dtype**)in_val_arr.value, (const dtype**)mask_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, mask_loss_arr.value, in_loss_arr.value); dim3 block_dim(count, dim, 1); int thread_count = 8; while (thread_count < max_in_count) { thread_count <<= 1; } KernelVectorAttentionBackward<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>((const dtype**)mask_arr.value, (const dtype**)mask_loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, unnormed_loss_arr.value); } __global__ void KernelPMultiForward(const dtype **ins1, const dtype **ins2, int count, int dim, bool on_training, const dtype* drop_mask, dtype drop_factor, dtype** vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype dropout = drop_factor > 0 ? drop_mask[dim_i * count + count_i] : 1; vals[count_i][dim_i] = drop_factor < dropout ? ins1[count_i][dim_i] * ins2[count_i][dim_i] : 0.0f; if (on_training) { if (drop_factor > 0.0f && drop_mask[dim_i * count + count_i] < drop_factor) { vals[count_i][dim_i] = 0.0f; } else { vals[count_i][dim_i] = ins1[count_i][dim_i] * ins2[count_i][dim_i]; } } else { vals[count_i][dim_i] = (1 - drop_factor) * ins1[count_i][dim_i] * ins2[count_i][dim_i]; } } } void PMultiForward(const std::vector<dtype*> &ins1, const std::vector<dtype*> &ins2, int count, int dim, bool on_training, const dtype* drop_mask, dtype dropout, std::vector<dtype*> &vals) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray ins1_arr, ins2_arr, vals_arr; ins1_arr.init((dtype**)ins1.data(), count); ins2_arr.init((dtype**)ins2.data(), count); vals_arr.init((dtype**)vals.data(), count); if (dropout < 0) { dropout = 0; } KernelPMultiForward<<<block_count, TPB>>>((const dtype**)ins1_arr.value, (const dtype**)ins2_arr.value, count, dim, on_training,drop_mask, dropout, vals_arr.value); } __global__ void KernelPMultiBackward(const dtype **losses, const dtype **in_vals1, const dtype **in_vals2, int count, int dim, const dtype *drop_mask, dtype drop_factor, dtype** in_losses1, dtype** in_losses2) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype dropout = drop_factor > 0 ? drop_mask[dim_i * count + count_i] : 1; if (drop_factor < dropout) { DeviceAtomicAdd(in_losses1[count_i] + dim_i, losses[count_i][dim_i] * in_vals2[count_i][dim_i]); DeviceAtomicAdd(in_losses2[count_i] + dim_i, losses[count_i][dim_i] * in_vals1[count_i][dim_i]); } } } void PMultiBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals1, const std::vector<dtype*> &in_vals2, int count, int dim, const dtype* drop_mask, dtype drop_factor, std::vector<dtype*> &in_losses1, std::vector<dtype*> &in_losses2) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray losses_arr, in_vals1_arr, in_vals2_arr, in_losses1_arr, in_losses2_arr; losses_arr.init((dtype**)losses.data(), losses.size()); in_vals1_arr.init((dtype**)in_vals1.data(), in_vals1.size()); in_vals2_arr.init((dtype**)in_vals2.data(), in_vals2.size()); in_losses1_arr.init((dtype**)in_losses1.data(), in_losses1.size()); in_losses2_arr.init((dtype**)in_losses2.data(), in_losses2.size()); KernelPMultiBackward<<<block_count, TPB>>>((const dtype**)losses_arr.value, (const dtype**)in_vals1_arr.value, (const dtype**)in_vals2_arr.value, count, dim, drop_mask, drop_factor, in_losses1_arr.value, in_losses2_arr.value); } __global__ void KernelPAddForward(const dtype*** ins, int count, int dim, int in_count, const dtype *drop_mask, dtype drop_factor, dtype **vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i+= step) { int count_i = i / dim; int dim_i = i % dim; dtype dropout = drop_factor > 0 ? drop_mask[dim_i * count + count_i] : 1; if (drop_factor < dropout) { dtype sum = ins[0][count_i][dim_i]; for (int j = 1; j < in_count; ++j) { sum += ins[j][count_i][dim_i]; } vals[count_i][dim_i] = sum; } else { vals[count_i][dim_i] = 0.0f; } } } void PAddForward(const std::vector<std::vector<dtype*>> &ins, int count, int dim, int in_count, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &vals) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(ins.size()); for (const std::vector<dtype*> &x : ins) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> ins_gpu; ins_gpu.reserve(ins.size()); for (auto &ptr : gpu_addr) { ins_gpu.push_back(ptr->value); } NumberPointerPointerArray in_arr; in_arr.init(ins_gpu.data(), ins_gpu.size()); NumberPointerArray out_arr; out_arr.init(vals.data(), vals.size()); int block_count = DefaultBlockCount(count * dim); KernelPAddForward<<<block_count, TPB>>>((const dtype***)in_arr.value, count, dim, in_count, drop_mask, drop_factor, out_arr.value); } __global__ void KernelPAddBackward(const dtype **losses, int count, int dim, int in_count, const dtype *drop_mask, dtype drop_factor, dtype ***in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int dim_mul_count = dim * count; for (int i = index; i < dim_mul_count * in_count; i += step) { int in_count_i = i / dim_mul_count; int dim_mul_count_i = i % dim_mul_count; int count_i = dim_mul_count_i / dim; int dim_i = dim_mul_count_i % dim; dtype dropout = drop_factor > 0 ? drop_mask[dim_i * count + count_i] : 1; if (drop_factor < dropout) { DeviceAtomicAdd(in_losses[in_count_i][count_i] + dim_i, losses[count_i][dim_i]); } } } void PAddBackward(const std::vector<dtype*> &losses, int count, int dim, int in_count, const dtype *drop_mask, dtype drop_factor, std::vector<std::vector<dtype*>> &in_losses) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(in_losses.size()); for (const std::vector<dtype*> &x : in_losses) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> in_losses_gpu; in_losses_gpu.reserve(in_losses.size()); for (auto &ptr : gpu_addr) { in_losses_gpu.push_back(ptr->value); } NumberPointerPointerArray in_loss_arr; in_loss_arr.init(in_losses_gpu.data(), in_losses_gpu.size()); NumberPointerArray out_loss_arr; out_loss_arr.init((dtype**)losses.data(), losses.size()); int block_count = DefaultBlockCount(in_count * count * dim); KernelPAddBackward<<<block_count, TPB>>>((const dtype**)out_loss_arr.value, count, dim, in_count, drop_mask, drop_factor, in_loss_arr.value); } int NextTwoIntegerPowerNumber(int number) { int result = 1; while (number > result) { result <<= 1; } return result; } __global__ void KernelSoftMaxLoss(const dtype **vals, dtype **losses, int *correct_count, int *answers, int batchsize, int count, int dim) { volatile __shared__ int opt_label; volatile __shared__ dtype shared_val[TPB]; volatile __shared__ int64_t max_indexes[TPB]; volatile __shared__ dtype scores_sum[TPB]; volatile __shared__ dtype scores[TPB]; int dim_i = threadIdx.x; int count_i = blockIdx.x; if (count_i == 0 && dim_i == 0) { *correct_count = 0; } shared_val[dim_i] = dim_i < dim ? vals[count_i][dim_i] : -INFINITY; max_indexes[dim_i] = dim_i; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (shared_val[threadIdx.x + i] > shared_val[threadIdx.x]) { // race shared_val[threadIdx.x] = shared_val[threadIdx.x + i]; // race max_indexes[threadIdx.x] = max_indexes[threadIdx.x + i]; // race } __syncthreads(); } if (threadIdx.x == 0) { opt_label = max_indexes[0]; if (answers[count_i] == opt_label) { atomicAdd(correct_count, 1); } } __syncthreads(); dtype max_score = vals[count_i][opt_label]; dtype score = dim_i < dim ? cuda_exp(vals[count_i][dim_i] - max_score) : 0.0f; scores[dim_i] = score; scores_sum[dim_i] = score; for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { scores_sum[threadIdx.x] = scores_sum[threadIdx.x] + scores_sum[threadIdx.x + i]; // race __syncthreads(); } if (dim_i < dim) { losses[count_i][dim_i] = (scores[dim_i] / scores_sum[0] - (dim_i == answers[count_i] ? 1 : 0)) / batchsize; } } void SoftMaxLoss(const std::vector<dtype*> &vals, std::vector<dtype*> &losses, int *correct_count, const std::vector<int> &answers, int batchsize, int count, int dim) { if (dim > TPB) { abort(); } int thread_count = NextTwoIntegerPowerNumber(dim); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray answer_arr; answer_arr.init((int*)answers.data(), answers.size()); KernelSoftMaxLoss<<<count, thread_count>>>( const_cast<const dtype **>(val_arr.value), const_cast<dtype **>(loss_arr.value), correct_count, answer_arr.value, batchsize, count, dim); } __global__ void Predict(const dtype *val, int dim, int *result) { __shared__ volatile dtype shared_vals[TPB]; __shared__ volatile dtype shared_indexes[TPB]; int index = DeviceDefaultIndex(); shared_indexes[threadIdx.x] = threadIdx.x; if (index < threadIdx.x) { shared_vals[threadIdx.x] = val[threadIdx.x]; } else { shared_vals[threadIdx.x] = -10000000.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (shared_vals[threadIdx.x] > shared_vals[threadIdx.x + i]) { shared_vals[threadIdx.x] = shared_vals[threadIdx.x + i]; shared_indexes[threadIdx.x] = threadIdx.x + i; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_indexes[0]; } } int Predict(const dtype* val, int dim) { if (dim > TPB) { abort(); } int thread_count = NextTwoIntegerPowerNumber(dim); DeviceInt result; result.init(); Predict<<<1, thread_count>>>(val, dim, result.value); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelSquareSum(const dtype *v, int len, dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { is_last_block = false; } shared_sum[threadIdx.x] = 0.0f; for (int i = index; i < len; i += blockDim.x * gridDim.x) { shared_sum[threadIdx.x] += v[i] * v[i]; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { float sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, int len) { int block_count = DefaultBlockCount(len); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); KernelSquareSum<<<block_count, TPB>>>(v, len, global_sum.value, block_counter.value, result.value); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelSquareSum(const dtype *v, const bool *indexers, int count, int dim, dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { global_sum[blockIdx.x] = 0.0f; is_last_block = false; } int count_i = index / dim; if (index < count * dim && indexers[count_i]) { shared_sum[threadIdx.x] = v[index] * v[index]; } else { shared_sum[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { float sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, const bool *indexers, int count, int dim) { int block_count = DefaultBlockCountWithoutLimit(count * dim); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); KernelSquareSum<<<block_count, TPB>>>(v, indexers, count, dim, global_sum.value, block_counter.value, result.value); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelRescale(dtype *v, int len, dtype scale) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i += step) { v[i] *= scale; } } void Rescale(dtype *v, int len, dtype scale) { int block_count = DefaultBlockCount(len); KernelRescale<<<block_count, TPB>>>(v, len, scale); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps, dtype x) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x; dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); dtype x = 1.0f / (1 - pow(belta1, iter + 1)); KernelUpdateAdam<<<block_count, TPB>>>(val, grad, row, col, aux_mean, aux_square, iter, belta1, belta2, alpha, reg, eps, x); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / col; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iters[count_i] + 1)) / (1 - cuda_pow(belta1, iters[count_i] + 1)); dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } } __global__ void KernelSelfPlusIters(const bool *indexers, int *iters, int count) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count; i += step) { if (indexers[i]) { ++iters[i]; } } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); KernelUpdateAdam<<<block_count, TPB>>>(val, grad, row, col, aux_mean, aux_square, indexers, iters, belta1, belta2, alpha, reg, eps); block_count = DefaultBlockCount(row); KernelSelfPlusIters<<<block_count, TPB>>>(indexers, iters, row); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); KernelUpdateAdagrad<<<block_count, TPB>>>(val, grad, row, col, aux_square, alpha, reg, eps); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / col; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); KernelUpdateAdagrad<<<block_count, TPB>>>(val, grad, row, col, aux_square, indexers, alpha, reg, eps); } void *GraphHostAlloc() { void *m; CallCuda(cudaHostAlloc(&m, 10000000, cudaHostAllocWriteCombined)); if (m == NULL) { abort(); } return m; } }
b1c60b26dfec089128b14b3cb62c04a0c5a4aea0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <iostream> //Kernel program for the device(GPU): compiled by NVCC __global__ void mulKernel(int*c, const int* a, const int*b,const int WIDTH){ int x = threadIdx.x; int y = threadIdx.y; int i = y * WIDTH + x; //[y][x] = y * WIDTH + x; int sum = 0; for(int k = 0; k < WIDTH; ++k){ sum += a[y * WIDTH + k] * b[k * WIDTH + x]; } c[i] = sum; } void cpuCode(){ //host - side data const int WIDTH = 5; int a[WIDTH][WIDTH]; int b[WIDTH][WIDTH]; int c[WIDTH][WIDTH] = {0}; //make matrices A,B for(int y = 0; y < WIDTH; ++y){ for(int x = 0; x < WIDTH; ++x){ a[y][x] = y + x; b[y][x] = y + x; } } //calculation code for(int y=0; y < WIDTH; ++y){ for(int x = 0; x < WIDTH; ++x){ int sum = 0; for (int k = 0; k < WIDTH ; ++k){ sum += a[y][k]*b[k][x]; } c[y][x] = sum; } } //print the result for(int y = 0; y < WIDTH ; ++y){ for(int x = 0 ; x < WIDTH ; ++x){ printf("%5d",c[y][x]); } printf("\n"); } } void cudaCode(){ //host-side data const int WIDTH = 5; int a[WIDTH][WIDTH]; int b[WIDTH][WIDTH]; int c[WIDTH][WIDTH] = { 0 }; //make a ,b matrices for(int y = 0; y < WIDTH; ++y){ for(int x = 0; x < WIDTH; ++x){ a[y][x] = y + x; b[y][x] = y + x; } } //allocate memory on the device //device-side data int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; //allocate device memory hipMalloc((void**)&dev_a, WIDTH * WIDTH * sizeof(int)); hipMalloc((void**)&dev_b, WIDTH * WIDTH * sizeof(int)); hipMalloc((void**)&dev_c, WIDTH*WIDTH * sizeof(int)); //copy form host to device hipMemcpy(dev_a,a,WIDTH * WIDTH * sizeof(int),hipMemcpyHostToDevice); //dev_a = a; hipMemcpy(dev_b, b,WIDTH * WIDTH * sizeof(int),hipMemcpyHostToDevice); // dev_b = b; //launcn a kernel on the GPU with one thread for each element dim3 dimBlock(WIDTH , WIDTH, 1 ); // x, y, z hipLaunchKernelGGL(( mulKernel), dim3(1) , dim3(dimBlock), 0, 0, dev_c, dev_a, dev_b, WIDTH); //CUDA_CHECK(hipPeekAtLastError()); //copy from device to host hipMemcpy(c,dev_c,WIDTH * WIDTH * sizeof(int),hipMemcpyDeviceToHost); //c = dev_c; //free device memory hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); //print the result for(int y = 0; y < WIDTH ; ++y){ for(int x = 0 ; x < WIDTH ; ++x){ printf("%d ",c[y][x]); } printf("\n"); } } int main(void){ cpuCode(); std::cout << "+++++"<<std::endl; cudaCode(); return 0; }
b1c60b26dfec089128b14b3cb62c04a0c5a4aea0.cu
#include <cstdio> #include <iostream> //Kernel program for the device(GPU): compiled by NVCC __global__ void mulKernel(int*c, const int* a, const int*b,const int WIDTH){ int x = threadIdx.x; int y = threadIdx.y; int i = y * WIDTH + x; //[y][x] = y * WIDTH + x; int sum = 0; for(int k = 0; k < WIDTH; ++k){ sum += a[y * WIDTH + k] * b[k * WIDTH + x]; } c[i] = sum; } void cpuCode(){ //host - side data const int WIDTH = 5; int a[WIDTH][WIDTH]; int b[WIDTH][WIDTH]; int c[WIDTH][WIDTH] = {0}; //make matrices A,B for(int y = 0; y < WIDTH; ++y){ for(int x = 0; x < WIDTH; ++x){ a[y][x] = y + x; b[y][x] = y + x; } } //calculation code for(int y=0; y < WIDTH; ++y){ for(int x = 0; x < WIDTH; ++x){ int sum = 0; for (int k = 0; k < WIDTH ; ++k){ sum += a[y][k]*b[k][x]; } c[y][x] = sum; } } //print the result for(int y = 0; y < WIDTH ; ++y){ for(int x = 0 ; x < WIDTH ; ++x){ printf("%5d",c[y][x]); } printf("\n"); } } void cudaCode(){ //host-side data const int WIDTH = 5; int a[WIDTH][WIDTH]; int b[WIDTH][WIDTH]; int c[WIDTH][WIDTH] = { 0 }; //make a ,b matrices for(int y = 0; y < WIDTH; ++y){ for(int x = 0; x < WIDTH; ++x){ a[y][x] = y + x; b[y][x] = y + x; } } //allocate memory on the device //device-side data int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; //allocate device memory cudaMalloc((void**)&dev_a, WIDTH * WIDTH * sizeof(int)); cudaMalloc((void**)&dev_b, WIDTH * WIDTH * sizeof(int)); cudaMalloc((void**)&dev_c, WIDTH*WIDTH * sizeof(int)); //copy form host to device cudaMemcpy(dev_a,a,WIDTH * WIDTH * sizeof(int),cudaMemcpyHostToDevice); //dev_a = a; cudaMemcpy(dev_b, b,WIDTH * WIDTH * sizeof(int),cudaMemcpyHostToDevice); // dev_b = b; //launcn a kernel on the GPU with one thread for each element dim3 dimBlock(WIDTH , WIDTH, 1 ); // x, y, z mulKernel<<<1 , dimBlock>>> (dev_c, dev_a, dev_b, WIDTH); //CUDA_CHECK(cudaPeekAtLastError()); //copy from device to host cudaMemcpy(c,dev_c,WIDTH * WIDTH * sizeof(int),cudaMemcpyDeviceToHost); //c = dev_c; //free device memory cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); //print the result for(int y = 0; y < WIDTH ; ++y){ for(int x = 0 ; x < WIDTH ; ++x){ printf("%d ",c[y][x]); } printf("\n"); } } int main(void){ cpuCode(); std::cout << "+++++"<<std::endl; cudaCode(); return 0; }
30bea43fd373a9391b6623d2ca494e2171eb783c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @device matview_transopse * Create on:Apr 17 2018 * @author: haili * the size of tensor is mnkl */ __global__ void d_batch_transpose(float* A,float* T,const int m, const int n,const int batch){ int tid=blockDim.x*blockIdx.x+threadIdx.x; int t_n=blockDim.x*gridDim.x; while(tid<m*n*batch){ A[(tid/(m*n))*n*m+(tid%(m*n))/n+((tid%(m*n))%n)*m]=T[tid]; tid=tid+t_n; __syncthreads(); } }
30bea43fd373a9391b6623d2ca494e2171eb783c.cu
/** * @device matview_transopse * Create on:Apr 17 2018 * @author: haili * the size of tensor is m×n×k×l */ __global__ void d_batch_transpose(float* A,float* T,const int m, const int n,const int batch){ int tid=blockDim.x*blockIdx.x+threadIdx.x; int t_n=blockDim.x*gridDim.x; while(tid<m*n*batch){ A[(tid/(m*n))*n*m+(tid%(m*n))/n+((tid%(m*n))%n)*m]=T[tid]; tid=tid+t_n; __syncthreads(); } }
a3fc9815aa087a23507a5c0387478a52eab9fade.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2011, T. Kroes <[email protected]> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "Core.cuh" texture<short, hipTextureType3D, hipReadModeNormalizedFloat> gTexDensity; texture<short, hipTextureType3D, hipReadModeNormalizedFloat> gTexGradientMagnitude; texture<float, hipTextureType3D, hipReadModeElementType> gTexExtinction; texture<float, hipTextureType1D, hipReadModeElementType> gTexOpacity; texture<float4, hipTextureType1D, hipReadModeElementType> gTexDiffuse; texture<float4, hipTextureType1D, hipReadModeElementType> gTexSpecular; texture<float, hipTextureType1D, hipReadModeElementType> gTexRoughness; texture<float4, hipTextureType1D, hipReadModeElementType> gTexEmission; texture<uchar4, hipTextureType2D, hipReadModeNormalizedFloat> gTexRunningEstimateRgba; texture<float4, hipTextureType3D, hipReadModeElementType> gTexOpacityGradient; texture<float, hipTextureType3D, hipReadModeNormalizedFloat> gTexOpacityMagnitudeNormalized; texture<float, hipTextureType3D, hipReadModeElementType> gTexLightPaths; hipArray* gpDensityArray = NULL; hipArray* gpLightPathsArray = NULL; hipArray* gpGradientMagnitudeArray = NULL; hipArray* gpOpacityArray = NULL; hipArray* gpDiffuseArray = NULL; hipArray* gpSpecularArray = NULL; hipArray* gpRoughnessArray = NULL; hipArray* gpEmissionArray = NULL; CD float3 gAaBbMin; CD float3 gAaBbMax; CD float3 gInvAaBbMin; CD float3 gInvAaBbMax; CD float3 gWorldToTextureTransform; CD float3 gVoxelTextureOffset; CD float gIntensityMin; CD float gIntensityMax; CD float gIntensityRange; CD float gIntensityInvRange; CD float gStepSize; CD float gStepSizeShadow; CD float gDensityScale; CD float3 gVoxelSizeWorld; CD float3 gInvGradientDelta; CD float3 gVoxelSizeWorldX; CD float3 gVoxelSizeWorldY; CD float3 gVoxelSizeWorldZ; CD int gFilmWidth; CD int gFilmHeight; CD int gFilmNoPixels; CD int gFilterWidth; CD float gFilterWeights[10]; CD float gExposure; CD float gInvExposure; CD float gGamma; CD float gInvGamma; CD float gDenoiseEnabled; CD float gDenoiseWindowRadius; CD float gDenoiseInvWindowArea; CD float gDenoiseNoise; CD float gDenoiseWeightThreshold; CD float gDenoiseLerpThreshold; CD float gDenoiseLerpC; CD float gNoIterations; CD float gInvNoIterations; CD float gScatteringHeadstart; CD float gGradientPower; CD float3 gSpacings; #define TF_NO_SAMPLES 128 #define INV_TF_NO_SAMPLES (1.0f / (float)TF_NO_SAMPLES) #include "Model.cuh" #include "View.cuh" #include "Blur.cuh" #include "Denoise.cuh" #include "Estimate.cuh" #include "Utilities.cuh" #include "SingleScattering.cuh" #include "MultipleScattering.cuh" #include "NearestIntersection.cuh" #include "SpecularBloom.cuh" #include "ToneMap.cuh" #include "MultipleScatteringPropertyBased.cuh" #include "MCKernelUtilities.cuh" #include <iostream> CCudaModel gModel; CCudaView gRenderCanvasView; CCudaView gNavigatorView; void BindDensityBuffer(short* pBuffer, hipExtent Extent, hipTextureFilterMode filtermode) { hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<short>(); HandleCudaError(hipMalloc3DArray(&gpDensityArray, &ChannelDesc, Extent)); hipMemcpy3DParms CopyParams = {0}; CopyParams.srcPtr = make_hipPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height); CopyParams.dstArray = gpDensityArray; CopyParams.extent = Extent; CopyParams.kind = hipMemcpyHostToDevice; HandleCudaError(hipMemcpy3D(&CopyParams)); gTexDensity.normalized = true; gTexDensity.filterMode = filtermode; gTexDensity.addressMode[0] = hipAddressModeClamp; gTexDensity.addressMode[1] = hipAddressModeClamp; gTexDensity.addressMode[2] = hipAddressModeClamp; HandleCudaError(hipBindTextureToArray(gTexDensity, gpDensityArray, ChannelDesc)); } void BindGradientMagnitudeBuffer(short* pBuffer, hipExtent Extent) { hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<short>(); HandleCudaError(hipMalloc3DArray(&gpGradientMagnitudeArray, &ChannelDesc, Extent)); hipMemcpy3DParms CopyParams = {0}; CopyParams.srcPtr = make_hipPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height); CopyParams.dstArray = gpGradientMagnitudeArray; CopyParams.extent = Extent; CopyParams.kind = hipMemcpyHostToDevice; HandleCudaError(hipMemcpy3D(&CopyParams)); gTexGradientMagnitude.normalized = true; gTexGradientMagnitude.filterMode = hipFilterModeLinear; gTexGradientMagnitude.addressMode[0] = hipAddressModeClamp; gTexGradientMagnitude.addressMode[1] = hipAddressModeClamp; gTexGradientMagnitude.addressMode[2] = hipAddressModeClamp; HandleCudaError(hipBindTextureToArray(gTexGradientMagnitude, gpGradientMagnitudeArray, ChannelDesc)); } void UnbindDensityBuffer(void) { HandleCudaError(hipFreeArray(gpDensityArray)); gpDensityArray = NULL; HandleCudaError(hipUnbindTexture(gTexDensity)); } void UnbindGradientMagnitudeBuffer(void) { HandleCudaError(hipFreeArray(gpGradientMagnitudeArray)); gpGradientMagnitudeArray = NULL; HandleCudaError(hipUnbindTexture(gTexGradientMagnitude)); } void BindLightPathsBuffer(float* pBuffer, hipExtent Extent) { hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float>(); HandleCudaError(hipMalloc3DArray(&gpLightPathsArray, &ChannelDesc, Extent)); hipMemcpy3DParms CopyParams = { 0 }; CopyParams.srcPtr = make_hipPitchedPtr(pBuffer, Extent.width * sizeof(float), Extent.width, Extent.height); CopyParams.dstArray = gpLightPathsArray; CopyParams.extent = Extent; CopyParams.kind = hipMemcpyHostToDevice; HandleCudaError(hipMemcpy3D(&CopyParams)); gTexLightPaths.normalized = true; gTexLightPaths.filterMode = hipFilterModePoint; gTexLightPaths.addressMode[0] = hipAddressModeClamp; gTexLightPaths.addressMode[1] = hipAddressModeClamp; gTexLightPaths.addressMode[2] = hipAddressModeClamp; HandleCudaError(hipBindTextureToArray(gTexLightPaths, gpLightPathsArray, ChannelDesc)); } void UnbindLightPathsBuffer(void) { HandleCudaError(hipFreeArray(gpLightPathsArray)); gpLightPathsArray = NULL; HandleCudaError(hipUnbindTexture(gTexLightPaths)); } void BindRenderCanvasView(const CResolution2D& Resolution) { gRenderCanvasView.Resize(Resolution); hipChannelFormatDesc Channel; Channel = hipCreateChannelDesc<uchar4>(); HandleCudaError(hipBindTexture2D(0, gTexRunningEstimateRgba, gRenderCanvasView.m_EstimateRgbaLdr.GetPtr(), Channel, gRenderCanvasView.GetWidth(), gRenderCanvasView.GetHeight(), gRenderCanvasView.m_EstimateRgbaLdr.GetPitch())); } void ResetRenderCanvasView(void) { gRenderCanvasView.Reset(); } void FreeRenderCanvasView(void) { gRenderCanvasView.Free(); } unsigned char* GetDisplayEstimate(void) { return (unsigned char*)gRenderCanvasView.m_DisplayEstimateRgbLdr.GetPtr(0, 0); } unsigned char* GetFrameDisplayEstimate(void) { return (unsigned char*)gRenderCanvasView.m_FrameEstimateXyza.GetPtr(0, 0); } void BindTransferFunctionOpacity(CTransferFunction& TransferFunctionOpacity) { gTexOpacity.normalized = true; gTexOpacity.filterMode = hipFilterModeLinear; gTexOpacity.addressMode[0] = hipAddressModeClamp; float Opacity[TF_NO_SAMPLES]; for (int i = 0; i < TF_NO_SAMPLES; i++) Opacity[i] = TransferFunctionOpacity.F((float)i * INV_TF_NO_SAMPLES).r; hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float>(); if (gpOpacityArray == NULL) HandleCudaError(hipMallocArray(&gpOpacityArray, &ChannelDesc, TF_NO_SAMPLES, 1)); HandleCudaError(hipMemcpyToArray(gpOpacityArray, 0, 0, Opacity, TF_NO_SAMPLES * sizeof(float), hipMemcpyHostToDevice)); HandleCudaError(hipBindTextureToArray(gTexOpacity, gpOpacityArray, ChannelDesc)); } void UnbindTransferFunctionOpacity(void) { HandleCudaError(hipFreeArray(gpOpacityArray)); gpOpacityArray = NULL; HandleCudaError(hipUnbindTexture(gTexOpacity)); } void BindTransferFunctionDiffuse(CTransferFunction& TransferFunctionDiffuse) { gTexDiffuse.normalized = true; gTexDiffuse.filterMode = hipFilterModeLinear; gTexDiffuse.addressMode[0] = hipAddressModeClamp; float4 Diffuse[TF_NO_SAMPLES]; for (int i = 0; i < TF_NO_SAMPLES; i++) { Diffuse[i].x = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).r; Diffuse[i].y = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).g; Diffuse[i].z = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).b; } hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float4>(); if (gpDiffuseArray == NULL) HandleCudaError(hipMallocArray(&gpDiffuseArray, &ChannelDesc, TF_NO_SAMPLES, 1)); HandleCudaError(hipMemcpyToArray(gpDiffuseArray, 0, 0, Diffuse, TF_NO_SAMPLES * sizeof(float4), hipMemcpyHostToDevice)); HandleCudaError(hipBindTextureToArray(gTexDiffuse, gpDiffuseArray, ChannelDesc)); } void UnbindTransferFunctionDiffuse(void) { HandleCudaError(hipFreeArray(gpDiffuseArray)); gpDiffuseArray = NULL; HandleCudaError(hipUnbindTexture(gTexDiffuse)); } void BindTransferFunctionSpecular(CTransferFunction& TransferFunctionSpecular) { gTexSpecular.normalized = true; gTexSpecular.filterMode = hipFilterModeLinear; gTexSpecular.addressMode[0] = hipAddressModeClamp; float4 Specular[TF_NO_SAMPLES]; for (int i = 0; i < TF_NO_SAMPLES; i++) { Specular[i].x = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).r; Specular[i].y = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).g; Specular[i].z = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).b; } hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float4>(); if (gpSpecularArray == NULL) HandleCudaError(hipMallocArray(&gpSpecularArray, &ChannelDesc, TF_NO_SAMPLES, 1)); HandleCudaError(hipMemcpyToArray(gpSpecularArray, 0, 0, Specular, TF_NO_SAMPLES * sizeof(float4), hipMemcpyHostToDevice)); HandleCudaError(hipBindTextureToArray(gTexSpecular, gpSpecularArray, ChannelDesc)); } void UnbindTransferFunctionSpecular(void) { HandleCudaError(hipFreeArray(gpSpecularArray)); gpSpecularArray = NULL; HandleCudaError(hipUnbindTexture(gTexSpecular)); } void BindTransferFunctionRoughness(CTransferFunction& TransferFunctionRoughness) { gTexRoughness.normalized = true; gTexRoughness.filterMode = hipFilterModeLinear; gTexRoughness.addressMode[0] = hipAddressModeClamp; float Roughness[TF_NO_SAMPLES]; for (int i = 0; i < TF_NO_SAMPLES; i++) Roughness[i] = TransferFunctionRoughness.F((float)i * INV_TF_NO_SAMPLES).r; hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float>(); if (gpRoughnessArray == NULL) HandleCudaError(hipMallocArray(&gpRoughnessArray, &ChannelDesc, TF_NO_SAMPLES, 1)); HandleCudaError(hipMemcpyToArray(gpRoughnessArray, 0, 0, Roughness, TF_NO_SAMPLES * sizeof(float), hipMemcpyHostToDevice)); HandleCudaError(hipBindTextureToArray(gTexRoughness, gpRoughnessArray, ChannelDesc)); } void UnbindTransferFunctionRoughness(void) { HandleCudaError(hipFreeArray(gpRoughnessArray)); gpRoughnessArray = NULL; HandleCudaError(hipUnbindTexture(gTexRoughness)); } void BindTransferFunctionEmission(CTransferFunction& TransferFunctionEmission) { gTexEmission.normalized = true; gTexEmission.filterMode = hipFilterModeLinear; gTexEmission.addressMode[0] = hipAddressModeClamp; float4 Emission[TF_NO_SAMPLES]; for (int i = 0; i < TF_NO_SAMPLES; i++) { Emission[i].x = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).r; Emission[i].y = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).g; Emission[i].z = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).b; } hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float4>(); if (gpEmissionArray == NULL) HandleCudaError(hipMallocArray(&gpEmissionArray, &ChannelDesc, TF_NO_SAMPLES, 1)); HandleCudaError(hipMemcpyToArray(gpEmissionArray, 0, 0, Emission, TF_NO_SAMPLES * sizeof(float4), hipMemcpyHostToDevice)); HandleCudaError(hipBindTextureToArray(gTexEmission, gpEmissionArray, ChannelDesc)); } void UnbindTransferFunctionEmission(void) { HandleCudaError(hipFreeArray(gpEmissionArray)); gpEmissionArray = NULL; HandleCudaError(hipUnbindTexture(gTexEmission)); } void BindConstants(CScene* pScene) { const float3 AaBbMin = make_float3(pScene->m_BoundingBox.GetMinP().x, pScene->m_BoundingBox.GetMinP().y, pScene->m_BoundingBox.GetMinP().z); const float3 AaBbMax = make_float3(pScene->m_BoundingBox.GetMaxP().x, pScene->m_BoundingBox.GetMaxP().y, pScene->m_BoundingBox.GetMaxP().z); HandleCudaError(hipMemcpyToSymbol(gAaBbMin, &AaBbMin, sizeof(float3))); HandleCudaError(hipMemcpyToSymbol(gAaBbMax, &AaBbMax, sizeof(float3))); const float3 InvAaBbMin = make_float3(pScene->m_BoundingBox.GetInvMinP().x, pScene->m_BoundingBox.GetInvMinP().y, pScene->m_BoundingBox.GetInvMinP().z); const float3 InvAaBbMax = make_float3(pScene->m_BoundingBox.GetInvMaxP().x, pScene->m_BoundingBox.GetInvMaxP().y, pScene->m_BoundingBox.GetInvMaxP().z); HandleCudaError(hipMemcpyToSymbol(gInvAaBbMin, &InvAaBbMin, sizeof(float3))); HandleCudaError(hipMemcpyToSymbol(gInvAaBbMax, &InvAaBbMax, sizeof(float3))); const float3 WorldToTextureTransform = make_float3( ((pScene->m_Resolution.GetResX() + 2.f - 1.f) / pScene->m_Resolution.GetResX()), ((pScene->m_Resolution.GetResY() + 2.f - 1.f) / pScene->m_Resolution.GetResY()), ((pScene->m_Resolution.GetResZ() + 2.f - 1.f) / pScene->m_Resolution.GetResZ()) ); const float3 VoxelTextureOffset = make_float3( (1.f / (2 * pScene->m_Resolution.GetResX())), (1.f / (2 * pScene->m_Resolution.GetResY())), (1.f / (2 * pScene->m_Resolution.GetResZ())) ); HandleCudaError(hipMemcpyToSymbol(gWorldToTextureTransform, &WorldToTextureTransform, sizeof(float3))); HandleCudaError(hipMemcpyToSymbol(gVoxelTextureOffset, &VoxelTextureOffset, sizeof(float3))); const float IntensityMin = pScene->m_IntensityRange.GetMin(); const float IntensityMax = pScene->m_IntensityRange.GetMax(); const float IntensityRange = pScene->m_IntensityRange.GetRange(); const float IntensityInvRange = 1.0f / IntensityRange; HandleCudaError(hipMemcpyToSymbol(gIntensityMin, &IntensityMin, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gIntensityMax, &IntensityMax, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gIntensityRange, &IntensityRange, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gIntensityInvRange, &IntensityInvRange, sizeof(float))); // First we find the shortest voxel axis, so a stepsize of 1 is always equal to at most 1 entire voxel along any of its axis float smallestVoxelAxis = fminf(fminf(pScene->m_VoxelSizeWorld.x, pScene->m_VoxelSizeWorld.y), pScene->m_VoxelSizeWorld.z); const float StepSize = pScene->m_StepSizeFactor * smallestVoxelAxis; const float StepSizeShadow = pScene->m_StepSizeFactorShadow * smallestVoxelAxis; const float ScatteringHeadstart = pScene->m_ScatteringHeadstart * smallestVoxelAxis; HandleCudaError(hipMemcpyToSymbol(gStepSize, &StepSize, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gStepSizeShadow, &StepSizeShadow, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gScatteringHeadstart, &ScatteringHeadstart, sizeof(float))); const float GradientPower = pScene->m_GradientPower; HandleCudaError(hipMemcpyToSymbol(gGradientPower, &GradientPower, sizeof(float))); const float DensityScale = pScene->m_DensityScale; HandleCudaError(hipMemcpyToSymbol(gDensityScale, &DensityScale, sizeof(float))); const float3 VoxelSizeWorld = make_float3(pScene->m_VoxelSizeWorld.x, pScene->m_VoxelSizeWorld.y, pScene->m_VoxelSizeWorld.z); // InvGradientDelta is only used in an old and flawed NormalizedGradient function a the moment const float3 InvGradientDelta = make_float3(1, 1, 1); float textureFloorAvoidance = 1.f; const Vec3f VoxelSizeWorldX(textureFloorAvoidance*VoxelSizeWorld.x, 0.0f, 0.0f); const Vec3f VoxelSizeWorldY(0.0f, textureFloorAvoidance*VoxelSizeWorld.y, 0.0f); const Vec3f VoxelSizeWorldZ(0.0f, 0.0f, textureFloorAvoidance*VoxelSizeWorld.z); const float3 Spacings = make_float3(pScene->m_Spacing.x, pScene->m_Spacing.y, pScene->m_Spacing.z); HandleCudaError(hipMemcpyToSymbol(gVoxelSizeWorld, &VoxelSizeWorld, sizeof(float3))); HandleCudaError(hipMemcpyToSymbol(gInvGradientDelta, &InvGradientDelta, sizeof(float3))); HandleCudaError(hipMemcpyToSymbol(gVoxelSizeWorldX, &VoxelSizeWorldX, sizeof(Vec3f))); HandleCudaError(hipMemcpyToSymbol(gVoxelSizeWorldY, &VoxelSizeWorldY, sizeof(Vec3f))); HandleCudaError(hipMemcpyToSymbol(gVoxelSizeWorldZ, &VoxelSizeWorldZ, sizeof(Vec3f))); HandleCudaError(hipMemcpyToSymbol(gSpacings, &Spacings, sizeof(float3))); const int FilmWidth = pScene->m_Camera.m_Film.GetWidth(); const int Filmheight = pScene->m_Camera.m_Film.GetHeight(); const int FilmNoPixels = pScene->m_Camera.m_Film.m_Resolution.GetNoElements(); HandleCudaError(hipMemcpyToSymbol(gFilmWidth, &FilmWidth, sizeof(int))); HandleCudaError(hipMemcpyToSymbol(gFilmHeight, &Filmheight, sizeof(int))); HandleCudaError(hipMemcpyToSymbol(gFilmNoPixels, &FilmNoPixels, sizeof(int))); const int FilterWidth = 1; HandleCudaError(hipMemcpyToSymbol(gFilterWidth, &FilterWidth, sizeof(int))); const float FilterWeights[10] = { 0.11411459588254977f, 0.08176668094332218f, 0.03008028089187349f, 0.01f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }; HandleCudaError(hipMemcpyToSymbol(gFilterWeights, &FilterWeights, 10 * sizeof(float))); const float Gamma = pScene->m_Camera.m_Film.m_Gamma; const float InvGamma = 1.0f / Gamma; const float Exposure = pScene->m_Camera.m_Film.m_Exposure; const float InvExposure = 1.0f / Exposure; HandleCudaError(hipMemcpyToSymbol(gExposure, &Exposure, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gInvExposure, &InvExposure, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gGamma, &Gamma, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gInvGamma, &InvGamma, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gDenoiseEnabled, &pScene->m_DenoiseParams.m_Enabled, sizeof(bool))); HandleCudaError(hipMemcpyToSymbol(gDenoiseWindowRadius, &pScene->m_DenoiseParams.m_WindowRadius, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gDenoiseInvWindowArea, &pScene->m_DenoiseParams.m_InvWindowArea, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gDenoiseNoise, &pScene->m_DenoiseParams.m_Noise, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gDenoiseWeightThreshold, &pScene->m_DenoiseParams.m_WeightThreshold, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gDenoiseLerpThreshold, &pScene->m_DenoiseParams.m_LerpThreshold, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gDenoiseLerpC, &pScene->m_DenoiseParams.m_LerpC, sizeof(float))); const float NoIterations = pScene->GetNoIterations(); const float InvNoIterations = 1.0f / __max(1.0f, NoIterations); HandleCudaError(hipMemcpyToSymbol(gNoIterations, &NoIterations, sizeof(float))); HandleCudaError(hipMemcpyToSymbol(gInvNoIterations, &InvNoIterations, sizeof(float))); } void Render(CScene& Scene, CTiming& RenderImage, CTiming& BlurImage, CTiming& PostProcessImage, CTiming& DenoiseImage) { CScene* pDevScene = NULL; HandleCudaError(hipMalloc(&pDevScene, sizeof(CScene))); HandleCudaError(hipMemcpy(pDevScene, &Scene, sizeof(CScene), hipMemcpyHostToDevice)); if (Scene.m_Camera.m_Focus.m_Type == 0) Scene.m_Camera.m_Focus.m_FocalDistance = NearestIntersection(pDevScene); HandleCudaError(hipMemcpy(pDevScene, &Scene, sizeof(CScene), hipMemcpyHostToDevice)); CCudaView* pDevView = NULL; HandleCudaError(hipMalloc(&pDevView, sizeof(CCudaView))); HandleCudaError(hipMemcpy(pDevView, &gRenderCanvasView, sizeof(CCudaView), hipMemcpyHostToDevice)); CCudaTimer TmrRender; switch (Scene.m_AlgorithmType) { case -1: { CanvasGreen(&Scene, pDevScene, pDevView); break; } case SINGLE_SCATTERING: { SingleScattering(&Scene, pDevScene, pDevView); break; } case MULTIPLE_SCATTERING: { MultipleScattering(&Scene, pDevScene, pDevView); break; } case PROPERTY_BASED: { MultipleScatteringPropertyBased(&Scene, pDevScene, pDevView); break; } case 4: case 5: case 9: case 10: case 11: case 12: case 13: case 14: case 16: case 17: case 18: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: case 28: case 29: { MultipleScatteringPropertyBasedDebug(&Scene, pDevScene, pDevView); break; } default: { throw new exception("Algorithm type not know"); } } RenderImage.AddDuration(TmrRender.ElapsedTime()); CCudaTimer TmrBlur; if (Scene.m_PostProcessingSteps & PostProcessingStepsEnum::BLUR) { Blur(&Scene, pDevScene, pDevView); } BlurImage.AddDuration(TmrBlur.ElapsedTime()); CCudaTimer TmrPostProcess; if (Scene.m_PostProcessingSteps & PostProcessingStepsEnum::ESTIMATE) { Estimate(&Scene, pDevScene, pDevView); } else { EstimatePassthrough(&Scene, pDevScene, pDevView); } PostProcessImage.AddDuration(TmrPostProcess.ElapsedTime()); if (Scene.m_PostProcessingSteps & PostProcessingStepsEnum::TONE_MAP) { ToneMap(&Scene, pDevScene, pDevView); } else { ToneMapPassthrough(&Scene, pDevScene, pDevView); } CCudaTimer TmrDenoise; if (Scene.m_PostProcessingSteps & PostProcessingStepsEnum::DENOISE) { Denoise(&Scene, pDevScene, pDevView); } else { DenoisePassthrough(&Scene, pDevScene, pDevView); } DenoiseImage.AddDuration(TmrDenoise.ElapsedTime()); HandleCudaError(hipFree(pDevScene)); HandleCudaError(hipFree(pDevView)); }
a3fc9815aa087a23507a5c0387478a52eab9fade.cu
/* Copyright (c) 2011, T. Kroes <[email protected]> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "Core.cuh" texture<short, cudaTextureType3D, cudaReadModeNormalizedFloat> gTexDensity; texture<short, cudaTextureType3D, cudaReadModeNormalizedFloat> gTexGradientMagnitude; texture<float, cudaTextureType3D, cudaReadModeElementType> gTexExtinction; texture<float, cudaTextureType1D, cudaReadModeElementType> gTexOpacity; texture<float4, cudaTextureType1D, cudaReadModeElementType> gTexDiffuse; texture<float4, cudaTextureType1D, cudaReadModeElementType> gTexSpecular; texture<float, cudaTextureType1D, cudaReadModeElementType> gTexRoughness; texture<float4, cudaTextureType1D, cudaReadModeElementType> gTexEmission; texture<uchar4, cudaTextureType2D, cudaReadModeNormalizedFloat> gTexRunningEstimateRgba; texture<float4, cudaTextureType3D, cudaReadModeElementType> gTexOpacityGradient; texture<float, cudaTextureType3D, cudaReadModeNormalizedFloat> gTexOpacityMagnitudeNormalized; texture<float, cudaTextureType3D, cudaReadModeElementType> gTexLightPaths; cudaArray* gpDensityArray = NULL; cudaArray* gpLightPathsArray = NULL; cudaArray* gpGradientMagnitudeArray = NULL; cudaArray* gpOpacityArray = NULL; cudaArray* gpDiffuseArray = NULL; cudaArray* gpSpecularArray = NULL; cudaArray* gpRoughnessArray = NULL; cudaArray* gpEmissionArray = NULL; CD float3 gAaBbMin; CD float3 gAaBbMax; CD float3 gInvAaBbMin; CD float3 gInvAaBbMax; CD float3 gWorldToTextureTransform; CD float3 gVoxelTextureOffset; CD float gIntensityMin; CD float gIntensityMax; CD float gIntensityRange; CD float gIntensityInvRange; CD float gStepSize; CD float gStepSizeShadow; CD float gDensityScale; CD float3 gVoxelSizeWorld; CD float3 gInvGradientDelta; CD float3 gVoxelSizeWorldX; CD float3 gVoxelSizeWorldY; CD float3 gVoxelSizeWorldZ; CD int gFilmWidth; CD int gFilmHeight; CD int gFilmNoPixels; CD int gFilterWidth; CD float gFilterWeights[10]; CD float gExposure; CD float gInvExposure; CD float gGamma; CD float gInvGamma; CD float gDenoiseEnabled; CD float gDenoiseWindowRadius; CD float gDenoiseInvWindowArea; CD float gDenoiseNoise; CD float gDenoiseWeightThreshold; CD float gDenoiseLerpThreshold; CD float gDenoiseLerpC; CD float gNoIterations; CD float gInvNoIterations; CD float gScatteringHeadstart; CD float gGradientPower; CD float3 gSpacings; #define TF_NO_SAMPLES 128 #define INV_TF_NO_SAMPLES (1.0f / (float)TF_NO_SAMPLES) #include "Model.cuh" #include "View.cuh" #include "Blur.cuh" #include "Denoise.cuh" #include "Estimate.cuh" #include "Utilities.cuh" #include "SingleScattering.cuh" #include "MultipleScattering.cuh" #include "NearestIntersection.cuh" #include "SpecularBloom.cuh" #include "ToneMap.cuh" #include "MultipleScatteringPropertyBased.cuh" #include "MCKernelUtilities.cuh" #include <iostream> CCudaModel gModel; CCudaView gRenderCanvasView; CCudaView gNavigatorView; void BindDensityBuffer(short* pBuffer, cudaExtent Extent, cudaTextureFilterMode filtermode) { cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<short>(); HandleCudaError(cudaMalloc3DArray(&gpDensityArray, &ChannelDesc, Extent)); cudaMemcpy3DParms CopyParams = {0}; CopyParams.srcPtr = make_cudaPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height); CopyParams.dstArray = gpDensityArray; CopyParams.extent = Extent; CopyParams.kind = cudaMemcpyHostToDevice; HandleCudaError(cudaMemcpy3D(&CopyParams)); gTexDensity.normalized = true; gTexDensity.filterMode = filtermode; gTexDensity.addressMode[0] = cudaAddressModeClamp; gTexDensity.addressMode[1] = cudaAddressModeClamp; gTexDensity.addressMode[2] = cudaAddressModeClamp; HandleCudaError(cudaBindTextureToArray(gTexDensity, gpDensityArray, ChannelDesc)); } void BindGradientMagnitudeBuffer(short* pBuffer, cudaExtent Extent) { cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<short>(); HandleCudaError(cudaMalloc3DArray(&gpGradientMagnitudeArray, &ChannelDesc, Extent)); cudaMemcpy3DParms CopyParams = {0}; CopyParams.srcPtr = make_cudaPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height); CopyParams.dstArray = gpGradientMagnitudeArray; CopyParams.extent = Extent; CopyParams.kind = cudaMemcpyHostToDevice; HandleCudaError(cudaMemcpy3D(&CopyParams)); gTexGradientMagnitude.normalized = true; gTexGradientMagnitude.filterMode = cudaFilterModeLinear; gTexGradientMagnitude.addressMode[0] = cudaAddressModeClamp; gTexGradientMagnitude.addressMode[1] = cudaAddressModeClamp; gTexGradientMagnitude.addressMode[2] = cudaAddressModeClamp; HandleCudaError(cudaBindTextureToArray(gTexGradientMagnitude, gpGradientMagnitudeArray, ChannelDesc)); } void UnbindDensityBuffer(void) { HandleCudaError(cudaFreeArray(gpDensityArray)); gpDensityArray = NULL; HandleCudaError(cudaUnbindTexture(gTexDensity)); } void UnbindGradientMagnitudeBuffer(void) { HandleCudaError(cudaFreeArray(gpGradientMagnitudeArray)); gpGradientMagnitudeArray = NULL; HandleCudaError(cudaUnbindTexture(gTexGradientMagnitude)); } void BindLightPathsBuffer(float* pBuffer, cudaExtent Extent) { cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float>(); HandleCudaError(cudaMalloc3DArray(&gpLightPathsArray, &ChannelDesc, Extent)); cudaMemcpy3DParms CopyParams = { 0 }; CopyParams.srcPtr = make_cudaPitchedPtr(pBuffer, Extent.width * sizeof(float), Extent.width, Extent.height); CopyParams.dstArray = gpLightPathsArray; CopyParams.extent = Extent; CopyParams.kind = cudaMemcpyHostToDevice; HandleCudaError(cudaMemcpy3D(&CopyParams)); gTexLightPaths.normalized = true; gTexLightPaths.filterMode = cudaFilterModePoint; gTexLightPaths.addressMode[0] = cudaAddressModeClamp; gTexLightPaths.addressMode[1] = cudaAddressModeClamp; gTexLightPaths.addressMode[2] = cudaAddressModeClamp; HandleCudaError(cudaBindTextureToArray(gTexLightPaths, gpLightPathsArray, ChannelDesc)); } void UnbindLightPathsBuffer(void) { HandleCudaError(cudaFreeArray(gpLightPathsArray)); gpLightPathsArray = NULL; HandleCudaError(cudaUnbindTexture(gTexLightPaths)); } void BindRenderCanvasView(const CResolution2D& Resolution) { gRenderCanvasView.Resize(Resolution); cudaChannelFormatDesc Channel; Channel = cudaCreateChannelDesc<uchar4>(); HandleCudaError(cudaBindTexture2D(0, gTexRunningEstimateRgba, gRenderCanvasView.m_EstimateRgbaLdr.GetPtr(), Channel, gRenderCanvasView.GetWidth(), gRenderCanvasView.GetHeight(), gRenderCanvasView.m_EstimateRgbaLdr.GetPitch())); } void ResetRenderCanvasView(void) { gRenderCanvasView.Reset(); } void FreeRenderCanvasView(void) { gRenderCanvasView.Free(); } unsigned char* GetDisplayEstimate(void) { return (unsigned char*)gRenderCanvasView.m_DisplayEstimateRgbLdr.GetPtr(0, 0); } unsigned char* GetFrameDisplayEstimate(void) { return (unsigned char*)gRenderCanvasView.m_FrameEstimateXyza.GetPtr(0, 0); } void BindTransferFunctionOpacity(CTransferFunction& TransferFunctionOpacity) { gTexOpacity.normalized = true; gTexOpacity.filterMode = cudaFilterModeLinear; gTexOpacity.addressMode[0] = cudaAddressModeClamp; float Opacity[TF_NO_SAMPLES]; for (int i = 0; i < TF_NO_SAMPLES; i++) Opacity[i] = TransferFunctionOpacity.F((float)i * INV_TF_NO_SAMPLES).r; cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float>(); if (gpOpacityArray == NULL) HandleCudaError(cudaMallocArray(&gpOpacityArray, &ChannelDesc, TF_NO_SAMPLES, 1)); HandleCudaError(cudaMemcpyToArray(gpOpacityArray, 0, 0, Opacity, TF_NO_SAMPLES * sizeof(float), cudaMemcpyHostToDevice)); HandleCudaError(cudaBindTextureToArray(gTexOpacity, gpOpacityArray, ChannelDesc)); } void UnbindTransferFunctionOpacity(void) { HandleCudaError(cudaFreeArray(gpOpacityArray)); gpOpacityArray = NULL; HandleCudaError(cudaUnbindTexture(gTexOpacity)); } void BindTransferFunctionDiffuse(CTransferFunction& TransferFunctionDiffuse) { gTexDiffuse.normalized = true; gTexDiffuse.filterMode = cudaFilterModeLinear; gTexDiffuse.addressMode[0] = cudaAddressModeClamp; float4 Diffuse[TF_NO_SAMPLES]; for (int i = 0; i < TF_NO_SAMPLES; i++) { Diffuse[i].x = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).r; Diffuse[i].y = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).g; Diffuse[i].z = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).b; } cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float4>(); if (gpDiffuseArray == NULL) HandleCudaError(cudaMallocArray(&gpDiffuseArray, &ChannelDesc, TF_NO_SAMPLES, 1)); HandleCudaError(cudaMemcpyToArray(gpDiffuseArray, 0, 0, Diffuse, TF_NO_SAMPLES * sizeof(float4), cudaMemcpyHostToDevice)); HandleCudaError(cudaBindTextureToArray(gTexDiffuse, gpDiffuseArray, ChannelDesc)); } void UnbindTransferFunctionDiffuse(void) { HandleCudaError(cudaFreeArray(gpDiffuseArray)); gpDiffuseArray = NULL; HandleCudaError(cudaUnbindTexture(gTexDiffuse)); } void BindTransferFunctionSpecular(CTransferFunction& TransferFunctionSpecular) { gTexSpecular.normalized = true; gTexSpecular.filterMode = cudaFilterModeLinear; gTexSpecular.addressMode[0] = cudaAddressModeClamp; float4 Specular[TF_NO_SAMPLES]; for (int i = 0; i < TF_NO_SAMPLES; i++) { Specular[i].x = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).r; Specular[i].y = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).g; Specular[i].z = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).b; } cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float4>(); if (gpSpecularArray == NULL) HandleCudaError(cudaMallocArray(&gpSpecularArray, &ChannelDesc, TF_NO_SAMPLES, 1)); HandleCudaError(cudaMemcpyToArray(gpSpecularArray, 0, 0, Specular, TF_NO_SAMPLES * sizeof(float4), cudaMemcpyHostToDevice)); HandleCudaError(cudaBindTextureToArray(gTexSpecular, gpSpecularArray, ChannelDesc)); } void UnbindTransferFunctionSpecular(void) { HandleCudaError(cudaFreeArray(gpSpecularArray)); gpSpecularArray = NULL; HandleCudaError(cudaUnbindTexture(gTexSpecular)); } void BindTransferFunctionRoughness(CTransferFunction& TransferFunctionRoughness) { gTexRoughness.normalized = true; gTexRoughness.filterMode = cudaFilterModeLinear; gTexRoughness.addressMode[0] = cudaAddressModeClamp; float Roughness[TF_NO_SAMPLES]; for (int i = 0; i < TF_NO_SAMPLES; i++) Roughness[i] = TransferFunctionRoughness.F((float)i * INV_TF_NO_SAMPLES).r; cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float>(); if (gpRoughnessArray == NULL) HandleCudaError(cudaMallocArray(&gpRoughnessArray, &ChannelDesc, TF_NO_SAMPLES, 1)); HandleCudaError(cudaMemcpyToArray(gpRoughnessArray, 0, 0, Roughness, TF_NO_SAMPLES * sizeof(float), cudaMemcpyHostToDevice)); HandleCudaError(cudaBindTextureToArray(gTexRoughness, gpRoughnessArray, ChannelDesc)); } void UnbindTransferFunctionRoughness(void) { HandleCudaError(cudaFreeArray(gpRoughnessArray)); gpRoughnessArray = NULL; HandleCudaError(cudaUnbindTexture(gTexRoughness)); } void BindTransferFunctionEmission(CTransferFunction& TransferFunctionEmission) { gTexEmission.normalized = true; gTexEmission.filterMode = cudaFilterModeLinear; gTexEmission.addressMode[0] = cudaAddressModeClamp; float4 Emission[TF_NO_SAMPLES]; for (int i = 0; i < TF_NO_SAMPLES; i++) { Emission[i].x = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).r; Emission[i].y = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).g; Emission[i].z = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).b; } cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float4>(); if (gpEmissionArray == NULL) HandleCudaError(cudaMallocArray(&gpEmissionArray, &ChannelDesc, TF_NO_SAMPLES, 1)); HandleCudaError(cudaMemcpyToArray(gpEmissionArray, 0, 0, Emission, TF_NO_SAMPLES * sizeof(float4), cudaMemcpyHostToDevice)); HandleCudaError(cudaBindTextureToArray(gTexEmission, gpEmissionArray, ChannelDesc)); } void UnbindTransferFunctionEmission(void) { HandleCudaError(cudaFreeArray(gpEmissionArray)); gpEmissionArray = NULL; HandleCudaError(cudaUnbindTexture(gTexEmission)); } void BindConstants(CScene* pScene) { const float3 AaBbMin = make_float3(pScene->m_BoundingBox.GetMinP().x, pScene->m_BoundingBox.GetMinP().y, pScene->m_BoundingBox.GetMinP().z); const float3 AaBbMax = make_float3(pScene->m_BoundingBox.GetMaxP().x, pScene->m_BoundingBox.GetMaxP().y, pScene->m_BoundingBox.GetMaxP().z); HandleCudaError(cudaMemcpyToSymbol(gAaBbMin, &AaBbMin, sizeof(float3))); HandleCudaError(cudaMemcpyToSymbol(gAaBbMax, &AaBbMax, sizeof(float3))); const float3 InvAaBbMin = make_float3(pScene->m_BoundingBox.GetInvMinP().x, pScene->m_BoundingBox.GetInvMinP().y, pScene->m_BoundingBox.GetInvMinP().z); const float3 InvAaBbMax = make_float3(pScene->m_BoundingBox.GetInvMaxP().x, pScene->m_BoundingBox.GetInvMaxP().y, pScene->m_BoundingBox.GetInvMaxP().z); HandleCudaError(cudaMemcpyToSymbol(gInvAaBbMin, &InvAaBbMin, sizeof(float3))); HandleCudaError(cudaMemcpyToSymbol(gInvAaBbMax, &InvAaBbMax, sizeof(float3))); const float3 WorldToTextureTransform = make_float3( ((pScene->m_Resolution.GetResX() + 2.f - 1.f) / pScene->m_Resolution.GetResX()), ((pScene->m_Resolution.GetResY() + 2.f - 1.f) / pScene->m_Resolution.GetResY()), ((pScene->m_Resolution.GetResZ() + 2.f - 1.f) / pScene->m_Resolution.GetResZ()) ); const float3 VoxelTextureOffset = make_float3( (1.f / (2 * pScene->m_Resolution.GetResX())), (1.f / (2 * pScene->m_Resolution.GetResY())), (1.f / (2 * pScene->m_Resolution.GetResZ())) ); HandleCudaError(cudaMemcpyToSymbol(gWorldToTextureTransform, &WorldToTextureTransform, sizeof(float3))); HandleCudaError(cudaMemcpyToSymbol(gVoxelTextureOffset, &VoxelTextureOffset, sizeof(float3))); const float IntensityMin = pScene->m_IntensityRange.GetMin(); const float IntensityMax = pScene->m_IntensityRange.GetMax(); const float IntensityRange = pScene->m_IntensityRange.GetRange(); const float IntensityInvRange = 1.0f / IntensityRange; HandleCudaError(cudaMemcpyToSymbol(gIntensityMin, &IntensityMin, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gIntensityMax, &IntensityMax, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gIntensityRange, &IntensityRange, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gIntensityInvRange, &IntensityInvRange, sizeof(float))); // First we find the shortest voxel axis, so a stepsize of 1 is always equal to at most 1 entire voxel along any of its axis float smallestVoxelAxis = fminf(fminf(pScene->m_VoxelSizeWorld.x, pScene->m_VoxelSizeWorld.y), pScene->m_VoxelSizeWorld.z); const float StepSize = pScene->m_StepSizeFactor * smallestVoxelAxis; const float StepSizeShadow = pScene->m_StepSizeFactorShadow * smallestVoxelAxis; const float ScatteringHeadstart = pScene->m_ScatteringHeadstart * smallestVoxelAxis; HandleCudaError(cudaMemcpyToSymbol(gStepSize, &StepSize, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gStepSizeShadow, &StepSizeShadow, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gScatteringHeadstart, &ScatteringHeadstart, sizeof(float))); const float GradientPower = pScene->m_GradientPower; HandleCudaError(cudaMemcpyToSymbol(gGradientPower, &GradientPower, sizeof(float))); const float DensityScale = pScene->m_DensityScale; HandleCudaError(cudaMemcpyToSymbol(gDensityScale, &DensityScale, sizeof(float))); const float3 VoxelSizeWorld = make_float3(pScene->m_VoxelSizeWorld.x, pScene->m_VoxelSizeWorld.y, pScene->m_VoxelSizeWorld.z); // InvGradientDelta is only used in an old and flawed NormalizedGradient function a the moment const float3 InvGradientDelta = make_float3(1, 1, 1); float textureFloorAvoidance = 1.f; const Vec3f VoxelSizeWorldX(textureFloorAvoidance*VoxelSizeWorld.x, 0.0f, 0.0f); const Vec3f VoxelSizeWorldY(0.0f, textureFloorAvoidance*VoxelSizeWorld.y, 0.0f); const Vec3f VoxelSizeWorldZ(0.0f, 0.0f, textureFloorAvoidance*VoxelSizeWorld.z); const float3 Spacings = make_float3(pScene->m_Spacing.x, pScene->m_Spacing.y, pScene->m_Spacing.z); HandleCudaError(cudaMemcpyToSymbol(gVoxelSizeWorld, &VoxelSizeWorld, sizeof(float3))); HandleCudaError(cudaMemcpyToSymbol(gInvGradientDelta, &InvGradientDelta, sizeof(float3))); HandleCudaError(cudaMemcpyToSymbol(gVoxelSizeWorldX, &VoxelSizeWorldX, sizeof(Vec3f))); HandleCudaError(cudaMemcpyToSymbol(gVoxelSizeWorldY, &VoxelSizeWorldY, sizeof(Vec3f))); HandleCudaError(cudaMemcpyToSymbol(gVoxelSizeWorldZ, &VoxelSizeWorldZ, sizeof(Vec3f))); HandleCudaError(cudaMemcpyToSymbol(gSpacings, &Spacings, sizeof(float3))); const int FilmWidth = pScene->m_Camera.m_Film.GetWidth(); const int Filmheight = pScene->m_Camera.m_Film.GetHeight(); const int FilmNoPixels = pScene->m_Camera.m_Film.m_Resolution.GetNoElements(); HandleCudaError(cudaMemcpyToSymbol(gFilmWidth, &FilmWidth, sizeof(int))); HandleCudaError(cudaMemcpyToSymbol(gFilmHeight, &Filmheight, sizeof(int))); HandleCudaError(cudaMemcpyToSymbol(gFilmNoPixels, &FilmNoPixels, sizeof(int))); const int FilterWidth = 1; HandleCudaError(cudaMemcpyToSymbol(gFilterWidth, &FilterWidth, sizeof(int))); const float FilterWeights[10] = { 0.11411459588254977f, 0.08176668094332218f, 0.03008028089187349f, 0.01f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }; HandleCudaError(cudaMemcpyToSymbol(gFilterWeights, &FilterWeights, 10 * sizeof(float))); const float Gamma = pScene->m_Camera.m_Film.m_Gamma; const float InvGamma = 1.0f / Gamma; const float Exposure = pScene->m_Camera.m_Film.m_Exposure; const float InvExposure = 1.0f / Exposure; HandleCudaError(cudaMemcpyToSymbol(gExposure, &Exposure, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gInvExposure, &InvExposure, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gGamma, &Gamma, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gInvGamma, &InvGamma, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gDenoiseEnabled, &pScene->m_DenoiseParams.m_Enabled, sizeof(bool))); HandleCudaError(cudaMemcpyToSymbol(gDenoiseWindowRadius, &pScene->m_DenoiseParams.m_WindowRadius, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gDenoiseInvWindowArea, &pScene->m_DenoiseParams.m_InvWindowArea, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gDenoiseNoise, &pScene->m_DenoiseParams.m_Noise, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gDenoiseWeightThreshold, &pScene->m_DenoiseParams.m_WeightThreshold, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gDenoiseLerpThreshold, &pScene->m_DenoiseParams.m_LerpThreshold, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gDenoiseLerpC, &pScene->m_DenoiseParams.m_LerpC, sizeof(float))); const float NoIterations = pScene->GetNoIterations(); const float InvNoIterations = 1.0f / __max(1.0f, NoIterations); HandleCudaError(cudaMemcpyToSymbol(gNoIterations, &NoIterations, sizeof(float))); HandleCudaError(cudaMemcpyToSymbol(gInvNoIterations, &InvNoIterations, sizeof(float))); } void Render(CScene& Scene, CTiming& RenderImage, CTiming& BlurImage, CTiming& PostProcessImage, CTiming& DenoiseImage) { CScene* pDevScene = NULL; HandleCudaError(cudaMalloc(&pDevScene, sizeof(CScene))); HandleCudaError(cudaMemcpy(pDevScene, &Scene, sizeof(CScene), cudaMemcpyHostToDevice)); if (Scene.m_Camera.m_Focus.m_Type == 0) Scene.m_Camera.m_Focus.m_FocalDistance = NearestIntersection(pDevScene); HandleCudaError(cudaMemcpy(pDevScene, &Scene, sizeof(CScene), cudaMemcpyHostToDevice)); CCudaView* pDevView = NULL; HandleCudaError(cudaMalloc(&pDevView, sizeof(CCudaView))); HandleCudaError(cudaMemcpy(pDevView, &gRenderCanvasView, sizeof(CCudaView), cudaMemcpyHostToDevice)); CCudaTimer TmrRender; switch (Scene.m_AlgorithmType) { case -1: { CanvasGreen(&Scene, pDevScene, pDevView); break; } case SINGLE_SCATTERING: { SingleScattering(&Scene, pDevScene, pDevView); break; } case MULTIPLE_SCATTERING: { MultipleScattering(&Scene, pDevScene, pDevView); break; } case PROPERTY_BASED: { MultipleScatteringPropertyBased(&Scene, pDevScene, pDevView); break; } case 4: case 5: case 9: case 10: case 11: case 12: case 13: case 14: case 16: case 17: case 18: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: case 28: case 29: { MultipleScatteringPropertyBasedDebug(&Scene, pDevScene, pDevView); break; } default: { throw new exception("Algorithm type not know"); } } RenderImage.AddDuration(TmrRender.ElapsedTime()); CCudaTimer TmrBlur; if (Scene.m_PostProcessingSteps & PostProcessingStepsEnum::BLUR) { Blur(&Scene, pDevScene, pDevView); } BlurImage.AddDuration(TmrBlur.ElapsedTime()); CCudaTimer TmrPostProcess; if (Scene.m_PostProcessingSteps & PostProcessingStepsEnum::ESTIMATE) { Estimate(&Scene, pDevScene, pDevView); } else { EstimatePassthrough(&Scene, pDevScene, pDevView); } PostProcessImage.AddDuration(TmrPostProcess.ElapsedTime()); if (Scene.m_PostProcessingSteps & PostProcessingStepsEnum::TONE_MAP) { ToneMap(&Scene, pDevScene, pDevView); } else { ToneMapPassthrough(&Scene, pDevScene, pDevView); } CCudaTimer TmrDenoise; if (Scene.m_PostProcessingSteps & PostProcessingStepsEnum::DENOISE) { Denoise(&Scene, pDevScene, pDevView); } else { DenoisePassthrough(&Scene, pDevScene, pDevView); } DenoiseImage.AddDuration(TmrDenoise.ElapsedTime()); HandleCudaError(cudaFree(pDevScene)); HandleCudaError(cudaFree(pDevView)); }
402adddeb4787baed5df5b2e16fe839cf3aeef0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /**********HEADERS**********/ #include <algorithm> #include <iostream> #include <iomanip> #include <string> #include <limits> #include <stdlib.h> #include <fstream> #include <math.h> #include <time.h> #include "cuda_ptr.cuh" #include "mimo-io.cuh" using namespace std; /**********DEFINING CONSTANTS***********/ #define NX 192 //was 201 #define NY 192 //was 201 #define NT 401 #define NS 640 //number of sensors #define BLOCK_X 16 #define BLOCK_Y 16 #define HX 0.001f #define HY 0.001f #define H 0.001f /* __constant__ float T = 1.3333e-04f; // 0.2f / 1500.f; */ #define DT 3.3333e-07f /* __constant__ float fre = 125000.f; */ #define OMEGAC 7.8540e+05f #define TAO 4.0000e-06f #define TT 8.1573e-06f /**********FUNCTION DECLARATION**********/ //Host Functions void Ultrasonic_Tomography(const string&, int, int, float); void Position_Transducers(host_ptr<int>, host_ptr<int>, int); //In-Line Functions inline int grid_size(int, int); template <typename T> __host__ __device__ void minmax(T &a, T &b); //Device Functions __global__ void propagation(kernel_ptr<int> const, kernel_ptr<int> const, kernel_ptr<float> const, kernel_ptr<float>, int, int, int); __global__ void propagation_at_corners(kernel_ptr<float>, int); __global__ void initial_signal(kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>,kernel_ptr<float>,int); /***************MAIN PROGRAM***************/ int main(int argc, char **argv) { //Command Line Argument Processing if (argc != 4) { cerr << "Usage: " << argv[0] << " <fo filename> <sensor group size> <percent noise>\n\n"; exit(1); } string fo_filename = argv[1]; int group_size = stoi(argv[2]); float percent_noise = (stoi (argv[3]))/100.00f; printf ("Percent Noise = %.2f\n\n", percent_noise); if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) { cerr << "Error: '" << fo_filename << "' should have only one period.\n" << " It should be in the current directory " << "and have only one filetype extension.\n\n"; exit(1); } // Time Measuring Variables int ti = 0, tf = 0; // set floating-point precision on stdout and stderr cout << fixed << setprecision(10); cerr << fixed << setprecision(10); cerr << "Ultrasonic Tomography Running:\n\n"; //Initial time ti = clock(); cerr << "ti = " << ti << "\n"; Ultrasonic_Tomography(fo_filename, group_size, ti, percent_noise); hipDeviceReset(); //Calculate total time tf = clock(); cerr << "tf = " << tf << "\n" << "tt = " << tf - ti << "\n" << "Total Seconds = " << (float)(tf - ti) / CLOCKS_PER_SEC << "\n"; } /**********HOST FUNCTION DEFINITIONS**********/ void Ultrasonic_Tomography(const string &fo_filename, int group_size, int ti, float percent_noise) { // environment initialization // fo(i, j) = // ground truth value at pos (i, j) of field host_ptr<float> fo(NX, NY); device_ptr<float> dev_fo(NX, NY); { ifstream fo_in(fo_filename); if (!fo_in) { cerr << "Error: '" + fo_filename + "' file not found in current directory.\n\n"; return; } read(fo_in, fo); copy(dev_fo, fo); } // Position of the transducers host_ptr<int> ii(NS); host_ptr<int> jj(NS); device_ptr<int> dev_ii(NS); device_ptr<int> dev_jj(NS); Position_Transducers(ii, jj, NS); // copy from host to device copy(dev_ii, ii); copy(dev_jj, jj); // Ng = number of sensor groups that will be launched in parallel int Ng = NS / group_size; // u(i, j, k, g) = // wave propagation at pos (i, j) of field, at time k, from sensor group g device_ptr<float> dev_u(NX, NY, NT, Ng); dev_u.set(0.f); // kernel launch parameters for propagation dim3 threads_propagation(NX, 1, 1); dim3 grid_propagation( grid_size(NX, threads_propagation.x), grid_size(NY, threads_propagation.y), grid_size(Ng, threads_propagation.z)); // kernel launch parameters for propagation_at_corners dim3 threads_prop_corners(NT, 1); dim3 grid_prop_corners( grid_size(NT, threads_prop_corners.x), grid_size(Ng, threads_prop_corners.y)); // initial wave propagation over fo for (int k = 1; k < NT - 1; ++k) hipLaunchKernelGGL(( propagation), dim3(grid_propagation), dim3(threads_propagation), 0, 0, dev_ii, dev_jj, dev_fo, dev_u, k, group_size, Ng); hipLaunchKernelGGL(( propagation_at_corners), dim3(grid_prop_corners), dim3(threads_prop_corners), 0, 0, dev_u, Ng); // gg_xxx(i, k, g) = // initial signal at pos i in row/column xxx // at time k, from sensor group // e.g g_bottom stores the bottom row, // g_right stores the right column device_ptr<float> dev_g_bottom(NX, NT, Ng); device_ptr<float> dev_g_right(NY, NT, Ng); device_ptr<float> dev_g_top(NX, NT, Ng); device_ptr<float> dev_g_left(NY, NT, Ng); dev_g_bottom.set(0.f); dev_g_right.set(0.f); dev_g_top.set(0.f); dev_g_left.set(0.f); // kernel launch parameters for initial_signal dim3 threads_signal(NX, 1, 1); dim3 grid_signal( grid_size(NX, threads_signal.x), grid_size(NT, threads_signal.y), grid_size(Ng, threads_signal.z)); host_ptr<float>u(NX, NY, NT, Ng); copy (u, dev_u) ; for (int x =0; x< NX; x++) { for (int y=0; y<NY; y++) { for (int t=0; t<NT; t++) { for (int g=0; g<Ng; g++) { float noise = (1.0f- percent_noise) + (percent_noise *2)*((float)rand()/RAND_MAX); //if (u(x,y,t,g)!=0){ // printf("Before: %.10f\t\t\t\t || \t\t\t\t", u(x,y,t,g)); u(x, y, t, g)=u(x, y, t, g) *noise; //printf ("After : %.10f\t\t\t\tNoise:%.2f\n\n", u(x,y,t,g), noise); //} } } } } copy(dev_u, u); // store initial signal of wave at sensor positions of u in g hipLaunchKernelGGL(( initial_signal), dim3(grid_signal), dim3(threads_signal), 0, 0, dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, Ng); host_ptr<float> g_bottom(NX, NT, Ng); host_ptr<float> g_right(NY, NT, Ng); host_ptr<float> g_top(NX, NT, Ng); host_ptr<float> g_left(NY, NT, Ng); copy(g_bottom, dev_g_bottom); copy(g_right, dev_g_right); copy(g_top, dev_g_top); copy(g_left, dev_g_left); /* for (int x =0; x< NX; x++) { for (int t=0; t<NT; t++) { for (int g=0; g<Ng; g++) { float noise = (1- percent_noise) + (percent_noise *2)*((float)rand()/RAND_MAX); g_bottom(x, t, g) = g_bottom(x, t, g) * noise; g_right (x,t,g)=g_right(x,t,g)*noise; g_top(x,t,g)=g_top (x,t,g)*noise; g_left(x,t,g)=g_left(x,t,g)*noise; } } } copy(dev_g_bottom, g_bottom); copy(dev_g_right, g_right); copy(dev_g_top, g_top); copy(dev_g_left, g_left); */ { auto idx = fo_filename.find_first_of('.'); string prefix = fo_filename.substr(0, idx) + "-data-"; string suffix = "-" + to_string(group_size) + ".txt"; string gb_name = prefix + "bottom" + suffix; string gr_name = prefix + "right" + suffix; string gt_name = prefix + "top" + suffix; string gl_name = prefix + "left" + suffix; ofstream gb_out(gb_name); ofstream gr_out(gr_name); ofstream gt_out(gt_name); ofstream gl_out(gl_name); cerr << "writing to '" << gb_name << "'...\n\n"; write(gb_out, g_bottom); cerr << "writing to '" << gr_name << "'...\n\n"; write(gr_out, g_right); cerr << "writing to '" << gt_name << "'...\n\n"; write(gt_out, g_top); cerr << "writing to '" << gl_name << "'...\n\n"; write(gl_out, g_left); } } void Position_Transducers(host_ptr<int> ii, host_ptr<int> jj, int num) { //returns the (x,y) coordinates of the number of total transducers int p = 0; for(p = 0; p < 160; p++) { ii(p) = 21 + (p + 1); jj(p) = 181; } for(p = 160; p < 320; p++) { ii(p) = 181; jj(p) = 181 - ((p + 1) - 160); } for(p = 320; p < 480; p++) { ii(p) = 181 - ((p + 1) - 320); jj(p) = 21; } for(p = 480; p < num; p++) { ii(p) = 21; jj(p) = 21 + ((p + 1) - 480); } } /**********DEVICE FUNCTION DEFINITIONS***********/ __global__ void propagation( kernel_ptr<int> const ii, kernel_ptr<int> const jj, kernel_ptr<float> const f, kernel_ptr<float> u, int k, int group_size, int Ng) { // Map from threadIdx / BlockIdx to pixel position int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int g = threadIdx.z + blockIdx.z * blockDim.z; if(i < NX && j < NY && g < Ng) { float v = 1500.f * sqrtf(1.f + f(i, j)); float r = v * DT / HX; float s = 2.f - 4.f * r * r; float val; // will hold new u at (i, j, k + 1, g) // not at boundary if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) { val = r * r * (u(i+1, j, k, g) + u(i-1, j, k, g) + u(i, j-1, k, g) + u(i, j+1, k, g)) + s * u(i, j, k, g) - u(i, j, k-1, g); int p = g * group_size; int jp1 = jj(p); int jp2 = jj(p + group_size - 1); int ip1 = ii(p); int ip2 = ii(p + group_size - 1); minmax(jp1, jp2); minmax(ip1, ip2); // at sensor, k <= 24 if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) { float t = k * DT - TT; // add wave value val += v * v * DT * DT * cosf(OMEGAC * t) * expf(-(t * t) / (2.f * TAO * TAO)); } } // at boundary else { // boundary booleans bool top = (j == 0); bool bottom = (j == NY - 1); bool left = (i == 0); bool right = (i == NX - 1); // index variables for different boundary cases int ja = top ? (j + 1) : bottom ? (j - 1) : j; int jb = top ? (j + 2) : bottom ? (j - 2) : j; int ia = left ? (i + 1) : right ? (i - 1) : i; int ib = left ? (i + 2) : right ? (i - 2) : i; val = (2.f - 2.f * r - r * r) * u(i, j, k, g) + 2.f * r * (1.f + r) * u(ia, ja, k, g) - r * r * u(ib, jb, k, g) + (2.f * r - 1.f) * u(i, j, k-1, g) - 2.f * r * u(ia, ja, k-1, g); } u(i, j, k+1, g) = val; } } __global__ void propagation_at_corners( kernel_ptr<float> u, int Ng) { int k = threadIdx.x + blockIdx.x * blockDim.x; int g = threadIdx.y + blockIdx.y * blockDim.y; if (k < NT && g < Ng) { u(0, 0, k, g) = 1.f / 2.f * (u(0, 1, k, g) + u(1, 0, k, g)); u(NX-1, 0, k, g) = 1.f / 2.f * (u(NX-2, 0, k, g) + u(NX-1, 1, k, g)); u(0, NY-1, k, g) = 1.f / 2.f * (u(0, NY-2, k, g) + u(1, NY-1, k, g)); u(NX-1, NY-1, k, g) = 1.f / 2.f * (u(NX-2, NY-1, k, g) + u(NX-1, NY-2, k, g)); } } __global__ void initial_signal( kernel_ptr<float> const u, kernel_ptr<float> g_bottom, kernel_ptr<float> g_right, kernel_ptr<float> g_top, kernel_ptr<float> g_left, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int k = threadIdx.y + blockIdx.y * blockDim.y; int g = threadIdx.z + blockIdx.z * blockDim.z; if (i > 20 && i < 180 && k > 1 && k < NT && g < Ng) { // store values at bottom sensor row of u g_bottom(i, k, g) = u(i, 180, k, g); // store values at top sensor row of u g_top(i, k, g) = u(i, 20, k, g); // store values at right sensor column of u g_right(i, k, g) = u(180, i, k, g); // store values at left sensor column of u g_left(i, k, g) = u(20, i, k, g); } } /**********INLINE FUNCTION DEFINITIONS**********/ inline int grid_size(int n, int threads) { return ceil(float(n) / threads); } // POST-CONDITION: a <= b template <typename T> __host__ __device__ void minmax(T &a, T &b) { if (a > b) { int t = a; a = b; b = t; } }
402adddeb4787baed5df5b2e16fe839cf3aeef0b.cu
/**********HEADERS**********/ #include <algorithm> #include <iostream> #include <iomanip> #include <string> #include <limits> #include <stdlib.h> #include <fstream> #include <math.h> #include <time.h> #include "cuda_ptr.cuh" #include "mimo-io.cuh" using namespace std; /**********DEFINING CONSTANTS***********/ #define NX 192 //was 201 #define NY 192 //was 201 #define NT 401 #define NS 640 //number of sensors #define BLOCK_X 16 #define BLOCK_Y 16 #define HX 0.001f #define HY 0.001f #define H 0.001f /* __constant__ float T = 1.3333e-04f; // 0.2f / 1500.f; */ #define DT 3.3333e-07f /* __constant__ float fre = 125000.f; */ #define OMEGAC 7.8540e+05f #define TAO 4.0000e-06f #define TT 8.1573e-06f /**********FUNCTION DECLARATION**********/ //Host Functions void Ultrasonic_Tomography(const string&, int, int, float); void Position_Transducers(host_ptr<int>, host_ptr<int>, int); //In-Line Functions inline int grid_size(int, int); template <typename T> __host__ __device__ void minmax(T &a, T &b); //Device Functions __global__ void propagation(kernel_ptr<int> const, kernel_ptr<int> const, kernel_ptr<float> const, kernel_ptr<float>, int, int, int); __global__ void propagation_at_corners(kernel_ptr<float>, int); __global__ void initial_signal(kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>,kernel_ptr<float>,int); /***************MAIN PROGRAM***************/ int main(int argc, char **argv) { //Command Line Argument Processing if (argc != 4) { cerr << "Usage: " << argv[0] << " <fo filename> <sensor group size> <percent noise>\n\n"; exit(1); } string fo_filename = argv[1]; int group_size = stoi(argv[2]); float percent_noise = (stoi (argv[3]))/100.00f; printf ("Percent Noise = %.2f\n\n", percent_noise); if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) { cerr << "Error: '" << fo_filename << "' should have only one period.\n" << " It should be in the current directory " << "and have only one filetype extension.\n\n"; exit(1); } // Time Measuring Variables int ti = 0, tf = 0; // set floating-point precision on stdout and stderr cout << fixed << setprecision(10); cerr << fixed << setprecision(10); cerr << "Ultrasonic Tomography Running:\n\n"; //Initial time ti = clock(); cerr << "ti = " << ti << "\n"; Ultrasonic_Tomography(fo_filename, group_size, ti, percent_noise); cudaDeviceReset(); //Calculate total time tf = clock(); cerr << "tf = " << tf << "\n" << "tt = " << tf - ti << "\n" << "Total Seconds = " << (float)(tf - ti) / CLOCKS_PER_SEC << "\n"; } /**********HOST FUNCTION DEFINITIONS**********/ void Ultrasonic_Tomography(const string &fo_filename, int group_size, int ti, float percent_noise) { // environment initialization // fo(i, j) = // ground truth value at pos (i, j) of field host_ptr<float> fo(NX, NY); device_ptr<float> dev_fo(NX, NY); { ifstream fo_in(fo_filename); if (!fo_in) { cerr << "Error: '" + fo_filename + "' file not found in current directory.\n\n"; return; } read(fo_in, fo); copy(dev_fo, fo); } // Position of the transducers host_ptr<int> ii(NS); host_ptr<int> jj(NS); device_ptr<int> dev_ii(NS); device_ptr<int> dev_jj(NS); Position_Transducers(ii, jj, NS); // copy from host to device copy(dev_ii, ii); copy(dev_jj, jj); // Ng = number of sensor groups that will be launched in parallel int Ng = NS / group_size; // u(i, j, k, g) = // wave propagation at pos (i, j) of field, at time k, from sensor group g device_ptr<float> dev_u(NX, NY, NT, Ng); dev_u.set(0.f); // kernel launch parameters for propagation dim3 threads_propagation(NX, 1, 1); dim3 grid_propagation( grid_size(NX, threads_propagation.x), grid_size(NY, threads_propagation.y), grid_size(Ng, threads_propagation.z)); // kernel launch parameters for propagation_at_corners dim3 threads_prop_corners(NT, 1); dim3 grid_prop_corners( grid_size(NT, threads_prop_corners.x), grid_size(Ng, threads_prop_corners.y)); // initial wave propagation over fo for (int k = 1; k < NT - 1; ++k) propagation<<<grid_propagation, threads_propagation>>>(dev_ii, dev_jj, dev_fo, dev_u, k, group_size, Ng); propagation_at_corners<<<grid_prop_corners, threads_prop_corners>>>(dev_u, Ng); // gg_xxx(i, k, g) = // initial signal at pos i in row/column xxx // at time k, from sensor group // e.g g_bottom stores the bottom row, // g_right stores the right column device_ptr<float> dev_g_bottom(NX, NT, Ng); device_ptr<float> dev_g_right(NY, NT, Ng); device_ptr<float> dev_g_top(NX, NT, Ng); device_ptr<float> dev_g_left(NY, NT, Ng); dev_g_bottom.set(0.f); dev_g_right.set(0.f); dev_g_top.set(0.f); dev_g_left.set(0.f); // kernel launch parameters for initial_signal dim3 threads_signal(NX, 1, 1); dim3 grid_signal( grid_size(NX, threads_signal.x), grid_size(NT, threads_signal.y), grid_size(Ng, threads_signal.z)); host_ptr<float>u(NX, NY, NT, Ng); copy (u, dev_u) ; for (int x =0; x< NX; x++) { for (int y=0; y<NY; y++) { for (int t=0; t<NT; t++) { for (int g=0; g<Ng; g++) { float noise = (1.0f- percent_noise) + (percent_noise *2)*((float)rand()/RAND_MAX); //if (u(x,y,t,g)!=0){ // printf("Before: %.10f\t\t\t\t || \t\t\t\t", u(x,y,t,g)); u(x, y, t, g)=u(x, y, t, g) *noise; //printf ("After : %.10f\t\t\t\tNoise:%.2f\n\n", u(x,y,t,g), noise); //} } } } } copy(dev_u, u); // store initial signal of wave at sensor positions of u in g initial_signal<<<grid_signal, threads_signal>>>(dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, Ng); host_ptr<float> g_bottom(NX, NT, Ng); host_ptr<float> g_right(NY, NT, Ng); host_ptr<float> g_top(NX, NT, Ng); host_ptr<float> g_left(NY, NT, Ng); copy(g_bottom, dev_g_bottom); copy(g_right, dev_g_right); copy(g_top, dev_g_top); copy(g_left, dev_g_left); /* for (int x =0; x< NX; x++) { for (int t=0; t<NT; t++) { for (int g=0; g<Ng; g++) { float noise = (1- percent_noise) + (percent_noise *2)*((float)rand()/RAND_MAX); g_bottom(x, t, g) = g_bottom(x, t, g) * noise; g_right (x,t,g)=g_right(x,t,g)*noise; g_top(x,t,g)=g_top (x,t,g)*noise; g_left(x,t,g)=g_left(x,t,g)*noise; } } } copy(dev_g_bottom, g_bottom); copy(dev_g_right, g_right); copy(dev_g_top, g_top); copy(dev_g_left, g_left); */ { auto idx = fo_filename.find_first_of('.'); string prefix = fo_filename.substr(0, idx) + "-data-"; string suffix = "-" + to_string(group_size) + ".txt"; string gb_name = prefix + "bottom" + suffix; string gr_name = prefix + "right" + suffix; string gt_name = prefix + "top" + suffix; string gl_name = prefix + "left" + suffix; ofstream gb_out(gb_name); ofstream gr_out(gr_name); ofstream gt_out(gt_name); ofstream gl_out(gl_name); cerr << "writing to '" << gb_name << "'...\n\n"; write(gb_out, g_bottom); cerr << "writing to '" << gr_name << "'...\n\n"; write(gr_out, g_right); cerr << "writing to '" << gt_name << "'...\n\n"; write(gt_out, g_top); cerr << "writing to '" << gl_name << "'...\n\n"; write(gl_out, g_left); } } void Position_Transducers(host_ptr<int> ii, host_ptr<int> jj, int num) { //returns the (x,y) coordinates of the number of total transducers int p = 0; for(p = 0; p < 160; p++) { ii(p) = 21 + (p + 1); jj(p) = 181; } for(p = 160; p < 320; p++) { ii(p) = 181; jj(p) = 181 - ((p + 1) - 160); } for(p = 320; p < 480; p++) { ii(p) = 181 - ((p + 1) - 320); jj(p) = 21; } for(p = 480; p < num; p++) { ii(p) = 21; jj(p) = 21 + ((p + 1) - 480); } } /**********DEVICE FUNCTION DEFINITIONS***********/ __global__ void propagation( kernel_ptr<int> const ii, kernel_ptr<int> const jj, kernel_ptr<float> const f, kernel_ptr<float> u, int k, int group_size, int Ng) { // Map from threadIdx / BlockIdx to pixel position int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int g = threadIdx.z + blockIdx.z * blockDim.z; if(i < NX && j < NY && g < Ng) { float v = 1500.f * sqrtf(1.f + f(i, j)); float r = v * DT / HX; float s = 2.f - 4.f * r * r; float val; // will hold new u at (i, j, k + 1, g) // not at boundary if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) { val = r * r * (u(i+1, j, k, g) + u(i-1, j, k, g) + u(i, j-1, k, g) + u(i, j+1, k, g)) + s * u(i, j, k, g) - u(i, j, k-1, g); int p = g * group_size; int jp1 = jj(p); int jp2 = jj(p + group_size - 1); int ip1 = ii(p); int ip2 = ii(p + group_size - 1); minmax(jp1, jp2); minmax(ip1, ip2); // at sensor, k <= 24 if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) { float t = k * DT - TT; // add wave value val += v * v * DT * DT * cosf(OMEGAC * t) * expf(-(t * t) / (2.f * TAO * TAO)); } } // at boundary else { // boundary booleans bool top = (j == 0); bool bottom = (j == NY - 1); bool left = (i == 0); bool right = (i == NX - 1); // index variables for different boundary cases int ja = top ? (j + 1) : bottom ? (j - 1) : j; int jb = top ? (j + 2) : bottom ? (j - 2) : j; int ia = left ? (i + 1) : right ? (i - 1) : i; int ib = left ? (i + 2) : right ? (i - 2) : i; val = (2.f - 2.f * r - r * r) * u(i, j, k, g) + 2.f * r * (1.f + r) * u(ia, ja, k, g) - r * r * u(ib, jb, k, g) + (2.f * r - 1.f) * u(i, j, k-1, g) - 2.f * r * u(ia, ja, k-1, g); } u(i, j, k+1, g) = val; } } __global__ void propagation_at_corners( kernel_ptr<float> u, int Ng) { int k = threadIdx.x + blockIdx.x * blockDim.x; int g = threadIdx.y + blockIdx.y * blockDim.y; if (k < NT && g < Ng) { u(0, 0, k, g) = 1.f / 2.f * (u(0, 1, k, g) + u(1, 0, k, g)); u(NX-1, 0, k, g) = 1.f / 2.f * (u(NX-2, 0, k, g) + u(NX-1, 1, k, g)); u(0, NY-1, k, g) = 1.f / 2.f * (u(0, NY-2, k, g) + u(1, NY-1, k, g)); u(NX-1, NY-1, k, g) = 1.f / 2.f * (u(NX-2, NY-1, k, g) + u(NX-1, NY-2, k, g)); } } __global__ void initial_signal( kernel_ptr<float> const u, kernel_ptr<float> g_bottom, kernel_ptr<float> g_right, kernel_ptr<float> g_top, kernel_ptr<float> g_left, int Ng) { int i = threadIdx.x + blockIdx.x * blockDim.x; int k = threadIdx.y + blockIdx.y * blockDim.y; int g = threadIdx.z + blockIdx.z * blockDim.z; if (i > 20 && i < 180 && k > 1 && k < NT && g < Ng) { // store values at bottom sensor row of u g_bottom(i, k, g) = u(i, 180, k, g); // store values at top sensor row of u g_top(i, k, g) = u(i, 20, k, g); // store values at right sensor column of u g_right(i, k, g) = u(180, i, k, g); // store values at left sensor column of u g_left(i, k, g) = u(20, i, k, g); } } /**********INLINE FUNCTION DEFINITIONS**********/ inline int grid_size(int n, int threads) { return ceil(float(n) / threads); } // POST-CONDITION: a <= b template <typename T> __host__ __device__ void minmax(T &a, T &b) { if (a > b) { int t = a; a = b; b = t; } }
0cd9bacb3acd17f3c67d2819477e269fbf52b419.hip
// !!! This is a file automatically generated by hipify!!! #include "luaT.h" #include "THH.h" #include "THLogAdd.h" /* DEBUG: WTF */ #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include "HardTanh.cu" #include "Tanh.cu" #include "Max.cu" #include "LogSoftMax.cu" #include "TemporalConvolution.cu" #include "SpatialConvolution.cu" #include "SpatialConvolutionMap.cu" #include "SpatialConvolutionCUDA.cu" #include "SpatialSubSampling.hip" #include "SpatialMaxPooling.cu" #include "SpatialMaxPoolingCUDA.cu" #include "Square.cu" #include "Sqrt.hip" #include "MultiMarginCriterion.hip" #include "MSECriterion.hip" #include "Threshold.cu" #include "Sigmoid.hip" #include "AbsCriterion.cu" LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L); int luaopen_libcunn(lua_State *L) { lua_newtable(L); cunn_Tanh_init(L); cunn_Sigmoid_init(L); cunn_Max_init(L); cunn_HardTanh_init(L); cunn_LogSoftMax_init(L); cunn_TemporalConvolution_init(L); cunn_SpatialConvolution_init(L); cunn_SpatialConvolutionCUDA_init(L); cunn_SpatialConvolutionMap_init(L); cunn_SpatialMaxPooling_init(L); cunn_SpatialMaxPoolingCUDA_init(L); cunn_SpatialSubSampling_init(L); cunn_MultiMarginCriterion_init(L); cunn_Square_init(L); cunn_Sqrt_init(L); cunn_Threshold_init(L); cunn_MSECriterion_init(L); cunn_AbsCriterion_init(L); return 1; }
0cd9bacb3acd17f3c67d2819477e269fbf52b419.cu
#include "luaT.h" #include "THC.h" #include "THLogAdd.h" /* DEBUG: WTF */ #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include "HardTanh.cu" #include "Tanh.cu" #include "Max.cu" #include "LogSoftMax.cu" #include "TemporalConvolution.cu" #include "SpatialConvolution.cu" #include "SpatialConvolutionMap.cu" #include "SpatialConvolutionCUDA.cu" #include "SpatialSubSampling.cu" #include "SpatialMaxPooling.cu" #include "SpatialMaxPoolingCUDA.cu" #include "Square.cu" #include "Sqrt.cu" #include "MultiMarginCriterion.cu" #include "MSECriterion.cu" #include "Threshold.cu" #include "Sigmoid.cu" #include "AbsCriterion.cu" LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L); int luaopen_libcunn(lua_State *L) { lua_newtable(L); cunn_Tanh_init(L); cunn_Sigmoid_init(L); cunn_Max_init(L); cunn_HardTanh_init(L); cunn_LogSoftMax_init(L); cunn_TemporalConvolution_init(L); cunn_SpatialConvolution_init(L); cunn_SpatialConvolutionCUDA_init(L); cunn_SpatialConvolutionMap_init(L); cunn_SpatialMaxPooling_init(L); cunn_SpatialMaxPoolingCUDA_init(L); cunn_SpatialSubSampling_init(L); cunn_MultiMarginCriterion_init(L); cunn_Square_init(L); cunn_Sqrt_init(L); cunn_Threshold_init(L); cunn_MSECriterion_init(L); cunn_AbsCriterion_init(L); return 1; }
cd720facba86050fde0c5aebb373eae2cff56265.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated d Tue Aug 13 16:45:13 2013 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #if (GPUSHMEM < 200) #define BLOCK_SIZE 512 #else #define BLOCK_SIZE 768 #endif __global__ void magma_dtrmv_tkernel(double *T, int ldt, double *v, double *y); // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void zsum_reduce( /*int n,*/ int i, double* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } // end sum_reduce //============================================================================== __global__ void magma_dgemv_kernel1(int m, const double * __restrict__ V, int ldv, const double * __restrict__ c, double *dwork) { const int i = threadIdx.x; const double *dV = V + (blockIdx.x) * ldv; __shared__ double sum[ BLOCK_SIZE ]; double lsum; /* lsum := v' * C */ lsum = MAGMA_D_ZERO; for( int j = i; j < m; j += BLOCK_SIZE ) lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dV[j] ), c[j] ); sum[i] = lsum; zsum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i==0) dwork [blockIdx.x] = sum[0]; } //============================================================================== __global__ void magma_dgemv_kernel3(int m, const double * __restrict__ V, int ldv, double *c, double *dwork, double *tau) { const int i = threadIdx.x; const double *dV = V + (blockIdx.x) * ldv; __shared__ double sum[ BLOCK_SIZE ]; double lsum; if (i==0) c[0] = MAGMA_D_ONE; /* lsum := v' * C */ lsum = MAGMA_D_ZERO; for( int j = i; j < m; j += BLOCK_SIZE ) lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dV[j] ), c[j] ); sum[i] = lsum; zsum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i==0) dwork [blockIdx.x] = -tau[0]*sum[0]; } //============================================================================== __global__ void magma_dgemv_kernel2(int m, int n, const double * __restrict__ V, int ldv, const double * __restrict__ x, double *c) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; double lsum; V += j; lsum = MAGMA_D_ZERO; if (j < m){ for(int k=0; k<n; k++) lsum += MAGMA_D_MUL( V[k*ldv], x[k]); c[j] -= lsum; } } //============================================================================== /* Apply a real block reflector H to a real vector C from the left (i.e., C = H C). H is represented in the form H = I - V T V' where T is the real k-by-k upper triangular matrix in the representation of the block reflector, and V is a real block of k elementary reflectors. */ extern "C" void magma_dlarfbx_gpu(magma_int_t m, magma_int_t k, double *V, magma_int_t ldv, double *T, magma_int_t ldt, double *c, double *dwork) { /* dwork = V' c */ hipLaunchKernelGGL(( magma_dgemv_kernel1), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m, V, ldv, c, dwork); /* dwork = T' dwork */ hipLaunchKernelGGL(( magma_dtrmv_tkernel), dim3(k), dim3(k), 0, magma_stream , T, ldt, dwork, dwork+k); /* c = c - V dwork */ dim3 blocks3( (m + BLOCK_SIZE-1) / BLOCK_SIZE ); dim3 threads3( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_dgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , m, k, V, ldv, dwork+k, c); } //==============================================================================
cd720facba86050fde0c5aebb373eae2cff56265.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated d Tue Aug 13 16:45:13 2013 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #if (GPUSHMEM < 200) #define BLOCK_SIZE 512 #else #define BLOCK_SIZE 768 #endif __global__ void magma_dtrmv_tkernel(double *T, int ldt, double *v, double *y); // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void zsum_reduce( /*int n,*/ int i, double* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } // end sum_reduce //============================================================================== __global__ void magma_dgemv_kernel1(int m, const double * __restrict__ V, int ldv, const double * __restrict__ c, double *dwork) { const int i = threadIdx.x; const double *dV = V + (blockIdx.x) * ldv; __shared__ double sum[ BLOCK_SIZE ]; double lsum; /* lsum := v' * C */ lsum = MAGMA_D_ZERO; for( int j = i; j < m; j += BLOCK_SIZE ) lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dV[j] ), c[j] ); sum[i] = lsum; zsum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i==0) dwork [blockIdx.x] = sum[0]; } //============================================================================== __global__ void magma_dgemv_kernel3(int m, const double * __restrict__ V, int ldv, double *c, double *dwork, double *tau) { const int i = threadIdx.x; const double *dV = V + (blockIdx.x) * ldv; __shared__ double sum[ BLOCK_SIZE ]; double lsum; if (i==0) c[0] = MAGMA_D_ONE; /* lsum := v' * C */ lsum = MAGMA_D_ZERO; for( int j = i; j < m; j += BLOCK_SIZE ) lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dV[j] ), c[j] ); sum[i] = lsum; zsum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i==0) dwork [blockIdx.x] = -tau[0]*sum[0]; } //============================================================================== __global__ void magma_dgemv_kernel2(int m, int n, const double * __restrict__ V, int ldv, const double * __restrict__ x, double *c) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; double lsum; V += j; lsum = MAGMA_D_ZERO; if (j < m){ for(int k=0; k<n; k++) lsum += MAGMA_D_MUL( V[k*ldv], x[k]); c[j] -= lsum; } } //============================================================================== /* Apply a real block reflector H to a real vector C from the left (i.e., C = H C). H is represented in the form H = I - V T V' where T is the real k-by-k upper triangular matrix in the representation of the block reflector, and V is a real block of k elementary reflectors. */ extern "C" void magma_dlarfbx_gpu(magma_int_t m, magma_int_t k, double *V, magma_int_t ldv, double *T, magma_int_t ldt, double *c, double *dwork) { /* dwork = V' c */ magma_dgemv_kernel1<<< k, BLOCK_SIZE, 0, magma_stream >>>(m, V, ldv, c, dwork); /* dwork = T' dwork */ magma_dtrmv_tkernel<<< k, k, 0, magma_stream >>>( T, ldt, dwork, dwork+k); /* c = c - V dwork */ dim3 blocks3( (m + BLOCK_SIZE-1) / BLOCK_SIZE ); dim3 threads3( BLOCK_SIZE ); magma_dgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>>( m, k, V, ldv, dwork+k, c); } //==============================================================================
4a8b5540382c19ea6bce22a1def3ff83b184d181.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "layer.h" static const unsigned DEFAULT_REDU_FAC = 1000; static const unsigned RELU_REDU_FAC = 1000; static const unsigned SIG_REDU_FAC = 100; __global__ void GPU_net_init(Net_T *, size_t, size_t, size_t); __global__ void GPU_net_free(Net_T *net); __global__ void GPU_conv_init(Conv_T *, size_t, size_t, size_t, size_t, size_t, size_t); __global__ void GPU_conv_free(Conv_T *); __global__ void GPU_pool_init(Pool_T *, size_t, size_t, size_t, size_t, size_t); __global__ void GPU_pool_free(Pool_T *); __global__ void GPU_softmax_init(Softmax_T *, size_t *, size_t, double, NonLin_T); __global__ void GPU_softmax_free(Softmax_T *); __global__ void LYR_softmax_gradDescent(Softmax_T *); Net_T *LYR_net_init(size_t maxNum, size_t maxHgt, size_t maxWid) { Net_T *net; hipMalloc((void **)&net, sizeof(Net_T)); hipLaunchKernelGGL(( GPU_net_init), dim3(1), dim3(1), 0, 0, net, maxNum, maxHgt, maxWid); hipDeviceSynchronize(); return net; } __global__ void LYR_net_update(Net_T *net, double *imgs, size_t num, size_t rows, size_t cols) { net->num = num; net->rows = rows; net->cols = cols; size_t numElm = num * rows * cols * NUM_CHNL; MAT_assign<<<LAUNCH1D(numElm)>>>(imgs, net->mats, numElm); } void LYR_net_free(Net_T *net) { hipLaunchKernelGGL(( GPU_net_free), dim3(1), dim3(1), 0, 0, net); hipDeviceSynchronize(); hipFree(net); } Conv_T *LYR_conv_init(size_t fNum, size_t fHgt, size_t fWid, size_t iNum, size_t iHgt, size_t iWid) { Conv_T *kern; hipMalloc((void **)&kern, sizeof(Conv_T)); hipLaunchKernelGGL(( GPU_conv_init), dim3(1), dim3(1), 0, 0, kern, fNum, fHgt, fWid, iNum, iHgt, iWid); return kern; } __global__ void LYR_conv_fwd(Net_T *net, Conv_T *kern) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t y = FLAT2D(blockIdx.y, threadIdx.y, blockDim.y); if (x == 0 && y == 0 ) { size_t numInp = net->num * net->rows * net->cols * NUM_CHNL; MAT_assign<<<LAUNCH1D(numInp)>>>(net->mats, kern->inp.mats, numInp); } __syncthreads(); size_t oNum = net->num * kern->fltrs.num; size_t oRows = CONV_OUT(net->rows, kern->fltrs.rows); size_t oCols = CONV_OUT(net->cols, kern->fltrs.cols); size_t oMatSize = oRows * oCols * NUM_CHNL; if (x < net->num && y < kern->fltrs.num) { size_t i = x * net->rows * net->cols * NUM_CHNL; size_t j = y * kern->fltrs.rows * kern->fltrs.cols * NUM_CHNL; size_t k = FLAT2D(x, y, kern->fltrs.num) * oMatSize; dim3 grdSize(NUMBLK(oRows, BLKS_2D), NUMBLK(oCols, BLKS_2D)); dim3 blkSize(BLKS_2D, BLKS_2D); hipLaunchKernelGGL(( MAT_convolve), dim3(grdSize), dim3(blkSize), 0, 0, &kern->inp.mats[i], &net->mats[k], &kern->fltrs.mats[j], net->rows, net->cols, kern->fltrs.rows, kern->fltrs.cols, false); } __syncthreads(); if (x == 0 && y == 0) { net->num = oNum; net->rows = oRows; net->cols = oCols; } } __global__ void LYR_conv_back(Conv_T *kern, Net_T *net, double *buf) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t y = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t numElm_delt = net->rows * net->cols * NUM_CHNL; if (x < kern->inp.num && y < kern->fltrs.num) { size_t i = FLAT2D(x, y, kern->fltrs.num) * numElm_delt; size_t j = x * kern->inp.rows * kern->inp.cols * NUM_CHNL; size_t k = y * kern->fltrs.rows * kern->fltrs.cols * NUM_CHNL; dim3 grdSize(NUMBLK(kern->inp.rows, BLKS_2D), NUMBLK(kern->inp.cols, BLKS_2D)); dim3 blkSize(BLKS_2D, BLKS_2D); hipLaunchKernelGGL(( MAT_inv_convolve), dim3(grdSize), dim3(blkSize), 0, 0, &net->mats[i], &buf[j], &kern->fltrs.mats[k], net->rows, net->cols, kern->fltrs.rows, kern->fltrs.cols, true); } __syncthreads(); if (x < kern->inp.num && y < kern->fltrs.num) { size_t i = FLAT2D(x, y, kern->fltrs.num) * numElm_delt; size_t j = x * kern->inp.rows * kern->inp.cols * NUM_CHNL; size_t k = y * kern->fltrs.rows * kern->fltrs.cols * NUM_CHNL; dim3 grdSize(NUMBLK(kern->fltrs.rows, BLKS_2D), NUMBLK(kern->fltrs.cols, BLKS_2D)); dim3 blkSize(BLKS_2D, BLKS_2D); hipLaunchKernelGGL(( MAT_convolve), dim3(grdSize), dim3(blkSize), 0, 0, &kern->inp.mats[j], &kern->fltrs.mats[k], &net->mats[i], kern->inp.rows, kern->inp.cols, net->rows, net->cols, true); } __syncthreads(); if (x == 0 && y == 0) { net->num = kern->inp.num; net->rows = kern->inp.rows; net->cols = kern->inp.cols; size_t numElm = net->num * net->rows * net->cols * NUM_CHNL; MAT_assign<<<LAUNCH1D(numElm)>>>(buf, net->mats, numElm); } } void LYR_conv_free(Conv_T *kern) { hipLaunchKernelGGL(( GPU_conv_free), dim3(1), dim3(1), 0, 0, kern); hipDeviceSynchronize(); hipFree(kern); } Pool_T *LYR_pool_init(size_t dim, size_t stride, size_t iNum, size_t iRows, size_t iCols) { Pool_T *pool; hipMalloc((void **)&pool, sizeof(Pool_T)); hipLaunchKernelGGL(( GPU_pool_init), dim3(1), dim3(1), 0, 0, pool, dim, stride, iNum, POOL_OUT(iRows, dim, stride), POOL_OUT(iCols, dim, stride)); hipDeviceSynchronize(); return pool; } __global__ void LYR_pool_fwd(Pool_T *pool, Net_T *net, double *buf) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t poolElms = pool->rows * pool->cols * NUM_CHNL; if (x < net->num) { size_t i = x * net->rows * net->cols * NUM_CHNL; size_t j = x * poolElms; dim3 grdSize(NUMBLK(pool->rows, BLKS_2D), NUMBLK(pool->cols, BLKS_2D)); dim3 blkSize(BLKS_2D, BLKS_2D); hipLaunchKernelGGL(( MAT_pool), dim3(grdSize), dim3(blkSize), 0, 0, &net->mats[i], &buf[j], &pool->idxs[j], net->rows, net->cols, pool->dim, pool->stride); } __syncthreads(); if (x == 0) { net->rows = pool->rows; net->cols = pool->cols; size_t numElm = net->num * poolElms; MAT_assign<<<LAUNCH1D(numElm)>>>(buf, net->mats, numElm); } } __global__ void LYR_pool_back(Pool_T *pool, Net_T *net, double *buf) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t oRows = ((pool->rows - 1) * pool->stride) + pool->dim; size_t oCols = ((pool->cols - 1) * pool->stride) + pool->dim; if (x < net->num) { size_t imgElm = pool->rows * pool->cols * NUM_CHNL; size_t i = x * imgElm; size_t j = x * oRows * oCols * NUM_CHNL; MAT_deltas_pool<<<LAUNCH1D(imgElm)>>>(&net->mats[i], &buf[j], &pool->idxs[i], net->rows, net->cols); } __syncthreads(); if (x == 0) { net->rows = oRows; net->cols = oCols; size_t totalElm = net->num * oRows * oCols * NUM_CHNL; MAT_assign<<<LAUNCH1D(totalElm)>>>(buf, net->mats, totalElm); } } void LYR_pool_free(Pool_T *pool) { hipLaunchKernelGGL(( GPU_pool_free), dim3(1), dim3(1), 0, 0, pool); hipDeviceSynchronize(); hipFree(pool); } __global__ void LYR_norm_fwd(Net_T *net, NonLin_T func) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); void (*nonLin)(double *, double *, size_t) = (func == RELU) ? &MAT_ReLU : &MAT_sigmoid; size_t matSize = net->rows * net->cols * NUM_CHNL; if (x < net->num) { size_t i = x * matSize; nonLin<<<LAUNCH1D(matSize)>>>(&net->mats[i], &net->mats[i], matSize); } } Softmax_T *LYR_softmax_init(size_t *topo, size_t numLyr, double lrnRate, NonLin_T fType) { Softmax_T *sm; hipMalloc((void **)&sm, sizeof(Softmax_T)); size_t *topo_d; hipMalloc((void **)&topo_d, numLyr * sizeof(size_t)); hipMemcpy(topo_d, topo, numLyr * sizeof(size_t), hipMemcpyHostToDevice); hipLaunchKernelGGL(( GPU_softmax_init), dim3(1), dim3(1), 0, 0, sm, topo_d, numLyr, lrnRate, fType); hipDeviceSynchronize(); hipFree(topo_d); return sm; } __global__ void LYR_softmax_fwd(Softmax_T *sm, Net_T *net) { size_t lastLyr = sm->numLyr - 1; MAT_assign<<<LAUNCH1D(sm->aTopo[0] - 1)>>>(net->mats, sm->activs, sm->aTopo[0] - 1); hipDeviceSynchronize(); for (size_t i = 0; i < lastLyr; i++) { MAT_fwdProp<<<LAUNCH1D(sm->wTopo[i])>>>(&sm->wgts[sm->wIdx[i]], &sm->activs[sm->aIdx[i]], &sm->activs[sm->aIdx[i + 1]], sm->wTopo[i], sm->aTopo[i], sm->fType); hipDeviceSynchronize(); } } __global__ void LYR_softmax_back(Softmax_T *sm, Net_T *net, size_t lbl) { size_t lastLyr = sm->numLyr - 1; MAT_deltas_out<<<LAUNCH1D(sm->aTopo[lastLyr])>>>(&sm->activs[sm->aIdx[lastLyr]], &sm->deltas[sm->aIdx[lastLyr]], sm->aTopo[lastLyr], lbl, sm->fType); hipDeviceSynchronize(); for (size_t i = lastLyr; i-- > 0;) { MAT_deltas_hidden<<<LAUNCH1D(sm->aTopo[i])>>>(&sm->activs[sm->aIdx[i]], &sm->deltas[sm->aIdx[i]], &sm->wgts[sm->wIdx[i]], &sm->deltas[sm->aIdx[i + 1]], sm->wTopo[i], sm->aTopo[i], sm->fType); hipDeviceSynchronize(); } size_t numElm = net->num * net->rows * net->cols * NUM_CHNL; MAT_assign<<<LAUNCH1D(numElm)>>>(sm->deltas, net->mats, numElm); LYR_softmax_gradDescent<<<LAUNCH1D(sm->numLyr)>>>(sm); } __global__ void LYR_softmax_loss(Softmax_T *sm, size_t lbl, double *loss) { size_t lastLyr = sm->numLyr - 1; MAT_loss<<<LAUNCH1D(sm->aTopo[lastLyr])>>>(&sm->activs[sm->aIdx[lastLyr]], sm->aTopo[lastLyr], lbl, loss); } __global__ void LYR_softmax_cpyOut(Softmax_T *sm, double *output) { size_t lastLyr = sm->numLyr - 1; MAT_assign<<<LAUNCH1D(sm->aTopo[lastLyr])>>>(&sm->activs[sm->aIdx[lastLyr]], output, sm->aTopo[lastLyr]); } void LYR_softmax_free(Softmax_T *sm) { hipLaunchKernelGGL(( GPU_softmax_free), dim3(1), dim3(1), 0, 0, sm); hipDeviceSynchronize(); hipFree(sm); } __global__ void GPU_net_init(Net_T *net, size_t maxNum, size_t maxHgt, size_t maxWid) { net->num = 1; net->rows = maxHgt; net->cols = maxWid; hipMalloc((void **)&net->mats, maxNum * maxHgt * maxWid * NUM_CHNL * sizeof(double)); } __global__ void GPU_net_free(Net_T *net) { hipFree(net->mats); } __global__ void GPU_conv_init(Conv_T *kern, size_t fNum, size_t fHgt, size_t fWid, size_t iNum, size_t iHgt, size_t iWid) { MatList_T fltrs; MatList_T inp; fltrs.num = fNum; fltrs.rows = fHgt; fltrs.cols = fWid; inp.num = iNum; inp.rows = iHgt; inp.cols = iWid; size_t numElm_fltrs = fNum * fHgt * fWid * NUM_CHNL; size_t numElm_inp = iNum * iHgt * iWid * NUM_CHNL; hipMalloc((void **)&fltrs.mats, numElm_fltrs * sizeof(double)); hipMalloc((void **)&inp.mats, numElm_inp * sizeof(double)); MAT_randomize<<<LAUNCH1D(numElm_fltrs)>>>(fltrs.mats, numElm_fltrs, DEFAULT_REDU_FAC); kern->fltrs = fltrs; kern->inp = inp; } __global__ void GPU_conv_free(Conv_T *kern) { hipFree(kern->fltrs.mats); hipFree(kern->inp.mats); } __global__ void GPU_pool_init(Pool_T *pool, size_t dim, size_t stride, size_t iNum, size_t oRows, size_t oCols) { pool->dim = dim; pool->stride = stride; pool->rows = oRows; pool->cols = oCols; hipMalloc((void **)&pool->idxs, iNum * oRows * oCols * NUM_CHNL * sizeof(double)); } __global__ void GPU_pool_free(Pool_T *pool) { hipFree(pool->idxs); } __global__ void GPU_softmax_init(Softmax_T *sm, size_t *topo, size_t numLyr, double lrnRate, NonLin_T fType) { sm->numLyr = numLyr; sm->lrnRate = lrnRate; sm->fType = fType; size_t lastLyr = numLyr - 1; hipMalloc((void **)&sm->aTopo, numLyr * sizeof(size_t)); hipMalloc((void **)&sm->aIdx, numLyr * sizeof(size_t)); hipMalloc((void **)&sm->wTopo, lastLyr * sizeof(size_t)); hipMalloc((void **)&sm->wIdx, lastLyr * sizeof(size_t)); size_t totalNrn = 0; size_t totalWgt = 0; for (size_t i = 0; i < lastLyr; i++) { sm->aTopo[i] = topo[i] + 1; sm->wTopo[i] = topo[i + 1]; sm->aIdx[i] = totalNrn; sm->wIdx[i] = totalWgt; totalNrn += sm->aTopo[i]; totalWgt += sm->aTopo[i] * sm->wTopo[i]; } sm->aTopo[lastLyr] = topo[lastLyr]; sm->aIdx[lastLyr] = totalNrn; totalNrn += topo[lastLyr]; hipMalloc((void **)&sm->activs, totalNrn * sizeof(double)); hipMalloc((void **)&sm->deltas, totalNrn * sizeof(double)); hipMalloc((void **)&sm->wgts, totalWgt * sizeof(double)); MAT_setVal<<<LAUNCH1D(totalNrn)>>>(sm->activs, totalNrn, 1); if (fType == RELU) { MAT_randomize<<<LAUNCH1D(totalWgt)>>>(sm->wgts, totalWgt, RELU_REDU_FAC); } else { MAT_randomize<<<LAUNCH1D(totalWgt)>>>(sm->wgts, totalWgt, SIG_REDU_FAC); } } __global__ void LYR_softmax_gradDescent(Softmax_T *sm) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t lastLyr = sm->numLyr - 1; if (x < lastLyr) { dim3 grdSize(NUMBLK(sm->wTopo[x], BLKS_2D), NUMBLK(sm->aTopo[x], BLKS_2D)); dim3 blkSize(BLKS_2D, BLKS_2D); hipLaunchKernelGGL(( MAT_applyGradient), dim3(grdSize), dim3(blkSize), 0, 0, &sm->activs[sm->aIdx[x]], &sm->deltas[sm->aIdx[x + 1]], &sm->wgts[sm->wIdx[x]], sm->wTopo[x], sm->aTopo[x], sm->lrnRate); } } __global__ void GPU_softmax_free(Softmax_T *sm) { hipFree(sm->aTopo); hipFree(sm->wTopo); hipFree(sm->aIdx); hipFree(sm->wIdx); hipFree(sm->deltas); hipFree(sm->activs); hipFree(sm->wgts); }
4a8b5540382c19ea6bce22a1def3ff83b184d181.cu
#include "layer.h" static const unsigned DEFAULT_REDU_FAC = 1000; static const unsigned RELU_REDU_FAC = 1000; static const unsigned SIG_REDU_FAC = 100; __global__ void GPU_net_init(Net_T *, size_t, size_t, size_t); __global__ void GPU_net_free(Net_T *net); __global__ void GPU_conv_init(Conv_T *, size_t, size_t, size_t, size_t, size_t, size_t); __global__ void GPU_conv_free(Conv_T *); __global__ void GPU_pool_init(Pool_T *, size_t, size_t, size_t, size_t, size_t); __global__ void GPU_pool_free(Pool_T *); __global__ void GPU_softmax_init(Softmax_T *, size_t *, size_t, double, NonLin_T); __global__ void GPU_softmax_free(Softmax_T *); __global__ void LYR_softmax_gradDescent(Softmax_T *); Net_T *LYR_net_init(size_t maxNum, size_t maxHgt, size_t maxWid) { Net_T *net; cudaMalloc((void **)&net, sizeof(Net_T)); GPU_net_init<<<1, 1>>>(net, maxNum, maxHgt, maxWid); cudaDeviceSynchronize(); return net; } __global__ void LYR_net_update(Net_T *net, double *imgs, size_t num, size_t rows, size_t cols) { net->num = num; net->rows = rows; net->cols = cols; size_t numElm = num * rows * cols * NUM_CHNL; MAT_assign<<<LAUNCH1D(numElm)>>>(imgs, net->mats, numElm); } void LYR_net_free(Net_T *net) { GPU_net_free<<<1, 1>>>(net); cudaDeviceSynchronize(); cudaFree(net); } Conv_T *LYR_conv_init(size_t fNum, size_t fHgt, size_t fWid, size_t iNum, size_t iHgt, size_t iWid) { Conv_T *kern; cudaMalloc((void **)&kern, sizeof(Conv_T)); GPU_conv_init<<<1, 1>>>(kern, fNum, fHgt, fWid, iNum, iHgt, iWid); return kern; } __global__ void LYR_conv_fwd(Net_T *net, Conv_T *kern) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t y = FLAT2D(blockIdx.y, threadIdx.y, blockDim.y); if (x == 0 && y == 0 ) { size_t numInp = net->num * net->rows * net->cols * NUM_CHNL; MAT_assign<<<LAUNCH1D(numInp)>>>(net->mats, kern->inp.mats, numInp); } __syncthreads(); size_t oNum = net->num * kern->fltrs.num; size_t oRows = CONV_OUT(net->rows, kern->fltrs.rows); size_t oCols = CONV_OUT(net->cols, kern->fltrs.cols); size_t oMatSize = oRows * oCols * NUM_CHNL; if (x < net->num && y < kern->fltrs.num) { size_t i = x * net->rows * net->cols * NUM_CHNL; size_t j = y * kern->fltrs.rows * kern->fltrs.cols * NUM_CHNL; size_t k = FLAT2D(x, y, kern->fltrs.num) * oMatSize; dim3 grdSize(NUMBLK(oRows, BLKS_2D), NUMBLK(oCols, BLKS_2D)); dim3 blkSize(BLKS_2D, BLKS_2D); MAT_convolve<<<grdSize, blkSize>>>(&kern->inp.mats[i], &net->mats[k], &kern->fltrs.mats[j], net->rows, net->cols, kern->fltrs.rows, kern->fltrs.cols, false); } __syncthreads(); if (x == 0 && y == 0) { net->num = oNum; net->rows = oRows; net->cols = oCols; } } __global__ void LYR_conv_back(Conv_T *kern, Net_T *net, double *buf) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t y = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t numElm_delt = net->rows * net->cols * NUM_CHNL; if (x < kern->inp.num && y < kern->fltrs.num) { size_t i = FLAT2D(x, y, kern->fltrs.num) * numElm_delt; size_t j = x * kern->inp.rows * kern->inp.cols * NUM_CHNL; size_t k = y * kern->fltrs.rows * kern->fltrs.cols * NUM_CHNL; dim3 grdSize(NUMBLK(kern->inp.rows, BLKS_2D), NUMBLK(kern->inp.cols, BLKS_2D)); dim3 blkSize(BLKS_2D, BLKS_2D); MAT_inv_convolve<<<grdSize, blkSize>>>(&net->mats[i], &buf[j], &kern->fltrs.mats[k], net->rows, net->cols, kern->fltrs.rows, kern->fltrs.cols, true); } __syncthreads(); if (x < kern->inp.num && y < kern->fltrs.num) { size_t i = FLAT2D(x, y, kern->fltrs.num) * numElm_delt; size_t j = x * kern->inp.rows * kern->inp.cols * NUM_CHNL; size_t k = y * kern->fltrs.rows * kern->fltrs.cols * NUM_CHNL; dim3 grdSize(NUMBLK(kern->fltrs.rows, BLKS_2D), NUMBLK(kern->fltrs.cols, BLKS_2D)); dim3 blkSize(BLKS_2D, BLKS_2D); MAT_convolve<<<grdSize, blkSize>>>(&kern->inp.mats[j], &kern->fltrs.mats[k], &net->mats[i], kern->inp.rows, kern->inp.cols, net->rows, net->cols, true); } __syncthreads(); if (x == 0 && y == 0) { net->num = kern->inp.num; net->rows = kern->inp.rows; net->cols = kern->inp.cols; size_t numElm = net->num * net->rows * net->cols * NUM_CHNL; MAT_assign<<<LAUNCH1D(numElm)>>>(buf, net->mats, numElm); } } void LYR_conv_free(Conv_T *kern) { GPU_conv_free<<<1, 1>>>(kern); cudaDeviceSynchronize(); cudaFree(kern); } Pool_T *LYR_pool_init(size_t dim, size_t stride, size_t iNum, size_t iRows, size_t iCols) { Pool_T *pool; cudaMalloc((void **)&pool, sizeof(Pool_T)); GPU_pool_init<<<1, 1>>>(pool, dim, stride, iNum, POOL_OUT(iRows, dim, stride), POOL_OUT(iCols, dim, stride)); cudaDeviceSynchronize(); return pool; } __global__ void LYR_pool_fwd(Pool_T *pool, Net_T *net, double *buf) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t poolElms = pool->rows * pool->cols * NUM_CHNL; if (x < net->num) { size_t i = x * net->rows * net->cols * NUM_CHNL; size_t j = x * poolElms; dim3 grdSize(NUMBLK(pool->rows, BLKS_2D), NUMBLK(pool->cols, BLKS_2D)); dim3 blkSize(BLKS_2D, BLKS_2D); MAT_pool<<<grdSize, blkSize>>>(&net->mats[i], &buf[j], &pool->idxs[j], net->rows, net->cols, pool->dim, pool->stride); } __syncthreads(); if (x == 0) { net->rows = pool->rows; net->cols = pool->cols; size_t numElm = net->num * poolElms; MAT_assign<<<LAUNCH1D(numElm)>>>(buf, net->mats, numElm); } } __global__ void LYR_pool_back(Pool_T *pool, Net_T *net, double *buf) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t oRows = ((pool->rows - 1) * pool->stride) + pool->dim; size_t oCols = ((pool->cols - 1) * pool->stride) + pool->dim; if (x < net->num) { size_t imgElm = pool->rows * pool->cols * NUM_CHNL; size_t i = x * imgElm; size_t j = x * oRows * oCols * NUM_CHNL; MAT_deltas_pool<<<LAUNCH1D(imgElm)>>>(&net->mats[i], &buf[j], &pool->idxs[i], net->rows, net->cols); } __syncthreads(); if (x == 0) { net->rows = oRows; net->cols = oCols; size_t totalElm = net->num * oRows * oCols * NUM_CHNL; MAT_assign<<<LAUNCH1D(totalElm)>>>(buf, net->mats, totalElm); } } void LYR_pool_free(Pool_T *pool) { GPU_pool_free<<<1, 1>>>(pool); cudaDeviceSynchronize(); cudaFree(pool); } __global__ void LYR_norm_fwd(Net_T *net, NonLin_T func) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); void (*nonLin)(double *, double *, size_t) = (func == RELU) ? &MAT_ReLU : &MAT_sigmoid; size_t matSize = net->rows * net->cols * NUM_CHNL; if (x < net->num) { size_t i = x * matSize; nonLin<<<LAUNCH1D(matSize)>>>(&net->mats[i], &net->mats[i], matSize); } } Softmax_T *LYR_softmax_init(size_t *topo, size_t numLyr, double lrnRate, NonLin_T fType) { Softmax_T *sm; cudaMalloc((void **)&sm, sizeof(Softmax_T)); size_t *topo_d; cudaMalloc((void **)&topo_d, numLyr * sizeof(size_t)); cudaMemcpy(topo_d, topo, numLyr * sizeof(size_t), cudaMemcpyHostToDevice); GPU_softmax_init<<<1, 1>>>(sm, topo_d, numLyr, lrnRate, fType); cudaDeviceSynchronize(); cudaFree(topo_d); return sm; } __global__ void LYR_softmax_fwd(Softmax_T *sm, Net_T *net) { size_t lastLyr = sm->numLyr - 1; MAT_assign<<<LAUNCH1D(sm->aTopo[0] - 1)>>>(net->mats, sm->activs, sm->aTopo[0] - 1); cudaDeviceSynchronize(); for (size_t i = 0; i < lastLyr; i++) { MAT_fwdProp<<<LAUNCH1D(sm->wTopo[i])>>>(&sm->wgts[sm->wIdx[i]], &sm->activs[sm->aIdx[i]], &sm->activs[sm->aIdx[i + 1]], sm->wTopo[i], sm->aTopo[i], sm->fType); cudaDeviceSynchronize(); } } __global__ void LYR_softmax_back(Softmax_T *sm, Net_T *net, size_t lbl) { size_t lastLyr = sm->numLyr - 1; MAT_deltas_out<<<LAUNCH1D(sm->aTopo[lastLyr])>>>(&sm->activs[sm->aIdx[lastLyr]], &sm->deltas[sm->aIdx[lastLyr]], sm->aTopo[lastLyr], lbl, sm->fType); cudaDeviceSynchronize(); for (size_t i = lastLyr; i-- > 0;) { MAT_deltas_hidden<<<LAUNCH1D(sm->aTopo[i])>>>(&sm->activs[sm->aIdx[i]], &sm->deltas[sm->aIdx[i]], &sm->wgts[sm->wIdx[i]], &sm->deltas[sm->aIdx[i + 1]], sm->wTopo[i], sm->aTopo[i], sm->fType); cudaDeviceSynchronize(); } size_t numElm = net->num * net->rows * net->cols * NUM_CHNL; MAT_assign<<<LAUNCH1D(numElm)>>>(sm->deltas, net->mats, numElm); LYR_softmax_gradDescent<<<LAUNCH1D(sm->numLyr)>>>(sm); } __global__ void LYR_softmax_loss(Softmax_T *sm, size_t lbl, double *loss) { size_t lastLyr = sm->numLyr - 1; MAT_loss<<<LAUNCH1D(sm->aTopo[lastLyr])>>>(&sm->activs[sm->aIdx[lastLyr]], sm->aTopo[lastLyr], lbl, loss); } __global__ void LYR_softmax_cpyOut(Softmax_T *sm, double *output) { size_t lastLyr = sm->numLyr - 1; MAT_assign<<<LAUNCH1D(sm->aTopo[lastLyr])>>>(&sm->activs[sm->aIdx[lastLyr]], output, sm->aTopo[lastLyr]); } void LYR_softmax_free(Softmax_T *sm) { GPU_softmax_free<<<1, 1>>>(sm); cudaDeviceSynchronize(); cudaFree(sm); } __global__ void GPU_net_init(Net_T *net, size_t maxNum, size_t maxHgt, size_t maxWid) { net->num = 1; net->rows = maxHgt; net->cols = maxWid; cudaMalloc((void **)&net->mats, maxNum * maxHgt * maxWid * NUM_CHNL * sizeof(double)); } __global__ void GPU_net_free(Net_T *net) { cudaFree(net->mats); } __global__ void GPU_conv_init(Conv_T *kern, size_t fNum, size_t fHgt, size_t fWid, size_t iNum, size_t iHgt, size_t iWid) { MatList_T fltrs; MatList_T inp; fltrs.num = fNum; fltrs.rows = fHgt; fltrs.cols = fWid; inp.num = iNum; inp.rows = iHgt; inp.cols = iWid; size_t numElm_fltrs = fNum * fHgt * fWid * NUM_CHNL; size_t numElm_inp = iNum * iHgt * iWid * NUM_CHNL; cudaMalloc((void **)&fltrs.mats, numElm_fltrs * sizeof(double)); cudaMalloc((void **)&inp.mats, numElm_inp * sizeof(double)); MAT_randomize<<<LAUNCH1D(numElm_fltrs)>>>(fltrs.mats, numElm_fltrs, DEFAULT_REDU_FAC); kern->fltrs = fltrs; kern->inp = inp; } __global__ void GPU_conv_free(Conv_T *kern) { cudaFree(kern->fltrs.mats); cudaFree(kern->inp.mats); } __global__ void GPU_pool_init(Pool_T *pool, size_t dim, size_t stride, size_t iNum, size_t oRows, size_t oCols) { pool->dim = dim; pool->stride = stride; pool->rows = oRows; pool->cols = oCols; cudaMalloc((void **)&pool->idxs, iNum * oRows * oCols * NUM_CHNL * sizeof(double)); } __global__ void GPU_pool_free(Pool_T *pool) { cudaFree(pool->idxs); } __global__ void GPU_softmax_init(Softmax_T *sm, size_t *topo, size_t numLyr, double lrnRate, NonLin_T fType) { sm->numLyr = numLyr; sm->lrnRate = lrnRate; sm->fType = fType; size_t lastLyr = numLyr - 1; cudaMalloc((void **)&sm->aTopo, numLyr * sizeof(size_t)); cudaMalloc((void **)&sm->aIdx, numLyr * sizeof(size_t)); cudaMalloc((void **)&sm->wTopo, lastLyr * sizeof(size_t)); cudaMalloc((void **)&sm->wIdx, lastLyr * sizeof(size_t)); size_t totalNrn = 0; size_t totalWgt = 0; for (size_t i = 0; i < lastLyr; i++) { sm->aTopo[i] = topo[i] + 1; sm->wTopo[i] = topo[i + 1]; sm->aIdx[i] = totalNrn; sm->wIdx[i] = totalWgt; totalNrn += sm->aTopo[i]; totalWgt += sm->aTopo[i] * sm->wTopo[i]; } sm->aTopo[lastLyr] = topo[lastLyr]; sm->aIdx[lastLyr] = totalNrn; totalNrn += topo[lastLyr]; cudaMalloc((void **)&sm->activs, totalNrn * sizeof(double)); cudaMalloc((void **)&sm->deltas, totalNrn * sizeof(double)); cudaMalloc((void **)&sm->wgts, totalWgt * sizeof(double)); MAT_setVal<<<LAUNCH1D(totalNrn)>>>(sm->activs, totalNrn, 1); if (fType == RELU) { MAT_randomize<<<LAUNCH1D(totalWgt)>>>(sm->wgts, totalWgt, RELU_REDU_FAC); } else { MAT_randomize<<<LAUNCH1D(totalWgt)>>>(sm->wgts, totalWgt, SIG_REDU_FAC); } } __global__ void LYR_softmax_gradDescent(Softmax_T *sm) { size_t x = FLAT2D(blockIdx.x, threadIdx.x, blockDim.x); size_t lastLyr = sm->numLyr - 1; if (x < lastLyr) { dim3 grdSize(NUMBLK(sm->wTopo[x], BLKS_2D), NUMBLK(sm->aTopo[x], BLKS_2D)); dim3 blkSize(BLKS_2D, BLKS_2D); MAT_applyGradient<<<grdSize, blkSize>>>(&sm->activs[sm->aIdx[x]], &sm->deltas[sm->aIdx[x + 1]], &sm->wgts[sm->wIdx[x]], sm->wTopo[x], sm->aTopo[x], sm->lrnRate); } } __global__ void GPU_softmax_free(Softmax_T *sm) { cudaFree(sm->aTopo); cudaFree(sm->wTopo); cudaFree(sm->aIdx); cudaFree(sm->wIdx); cudaFree(sm->deltas); cudaFree(sm->activs); cudaFree(sm->wgts); }
c01aac6b21ea6677e704ed297898ea4ccfa6f540.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef _BICUBICTEXTURE_CU_ #define _BICUBICTEXTURE_CU_ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cutil_inline.h> // includes cuda.h and hip/hip_runtime_api.h #include <cutil_math.h> typedef unsigned int uint; typedef unsigned char uchar; #include <bicubicTexture_kernel.cuh> hipArray *d_imageArray = 0; extern "C" void initTexture(int imageWidth, int imageHeight, uchar *h_data) { // allocate array and copy image data hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned); cutilSafeCall( hipMallocArray(&d_imageArray, &channelDesc, imageWidth, imageHeight) ); uint size = imageWidth * imageHeight * sizeof(uchar); cutilSafeCall( hipMemcpyToArray(d_imageArray, 0, 0, h_data, size, hipMemcpyHostToDevice) ); cutFree(h_data); // set texture parameters tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; tex.filterMode = hipFilterModeLinear; tex.normalized = false; // access with integer texture coordinates cutilCheckMsg("initTexture"); // Bind the array to the texture cutilSafeCall( hipBindTextureToArray(tex, d_imageArray) ); // bind same array to 2nd texture reference with point sampling tex2.addressMode[0] = hipAddressModeClamp; tex2.addressMode[1] = hipAddressModeClamp; tex2.filterMode = hipFilterModePoint; tex2.normalized = false; // access with integer texture coordinates cutilSafeCall( hipBindTextureToArray(tex2, d_imageArray) ); } extern "C" void freeTexture() { cutilSafeCall(hipFreeArray(d_imageArray)); } // render image using CUDA extern "C" void render(int width, int height, float tx, float ty, float scale, float cx, float cy, dim3 blockSize, dim3 gridSize, int mode, uchar4 *output) { // call CUDA kernel, writing results to PBO memory switch(mode) { case MODE_NEAREST: tex.filterMode = hipFilterModePoint; hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, output, width, height, tx, ty, scale, cx, cy); break; case MODE_BILINEAR: tex.filterMode = hipFilterModeLinear; hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, output, width, height, tx, ty, scale, cx, cy); break; case MODE_BICUBIC: tex.filterMode = hipFilterModePoint; hipLaunchKernelGGL(( d_renderBicubic), dim3(gridSize), dim3(blockSize), 0, 0, output, width, height, tx, ty, scale, cx, cy); break; case MODE_FAST_BICUBIC: tex.filterMode = hipFilterModeLinear; hipLaunchKernelGGL(( d_renderFastBicubic), dim3(gridSize), dim3(blockSize), 0, 0, output, width, height, tx, ty, scale, cx, cy); break; case MODE_CATROM: tex.filterMode = hipFilterModePoint; hipLaunchKernelGGL(( d_renderCatRom), dim3(gridSize), dim3(blockSize), 0, 0, output, width, height, tx, ty, scale, cx, cy); break; } cutilCheckMsg("kernel failed"); } #endif
c01aac6b21ea6677e704ed297898ea4ccfa6f540.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef _BICUBICTEXTURE_CU_ #define _BICUBICTEXTURE_CU_ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cutil_inline.h> // includes cuda.h and cuda_runtime_api.h #include <cutil_math.h> typedef unsigned int uint; typedef unsigned char uchar; #include <bicubicTexture_kernel.cuh> cudaArray *d_imageArray = 0; extern "C" void initTexture(int imageWidth, int imageHeight, uchar *h_data) { // allocate array and copy image data cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned); cutilSafeCall( cudaMallocArray(&d_imageArray, &channelDesc, imageWidth, imageHeight) ); uint size = imageWidth * imageHeight * sizeof(uchar); cutilSafeCall( cudaMemcpyToArray(d_imageArray, 0, 0, h_data, size, cudaMemcpyHostToDevice) ); cutFree(h_data); // set texture parameters tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; tex.filterMode = cudaFilterModeLinear; tex.normalized = false; // access with integer texture coordinates cutilCheckMsg("initTexture"); // Bind the array to the texture cutilSafeCall( cudaBindTextureToArray(tex, d_imageArray) ); // bind same array to 2nd texture reference with point sampling tex2.addressMode[0] = cudaAddressModeClamp; tex2.addressMode[1] = cudaAddressModeClamp; tex2.filterMode = cudaFilterModePoint; tex2.normalized = false; // access with integer texture coordinates cutilSafeCall( cudaBindTextureToArray(tex2, d_imageArray) ); } extern "C" void freeTexture() { cutilSafeCall(cudaFreeArray(d_imageArray)); } // render image using CUDA extern "C" void render(int width, int height, float tx, float ty, float scale, float cx, float cy, dim3 blockSize, dim3 gridSize, int mode, uchar4 *output) { // call CUDA kernel, writing results to PBO memory switch(mode) { case MODE_NEAREST: tex.filterMode = cudaFilterModePoint; d_render<<<gridSize, blockSize>>>(output, width, height, tx, ty, scale, cx, cy); break; case MODE_BILINEAR: tex.filterMode = cudaFilterModeLinear; d_render<<<gridSize, blockSize>>>(output, width, height, tx, ty, scale, cx, cy); break; case MODE_BICUBIC: tex.filterMode = cudaFilterModePoint; d_renderBicubic<<<gridSize, blockSize>>>(output, width, height, tx, ty, scale, cx, cy); break; case MODE_FAST_BICUBIC: tex.filterMode = cudaFilterModeLinear; d_renderFastBicubic<<<gridSize, blockSize>>>(output, width, height, tx, ty, scale, cx, cy); break; case MODE_CATROM: tex.filterMode = cudaFilterModePoint; d_renderCatRom<<<gridSize, blockSize>>>(output, width, height, tx, ty, scale, cx, cy); break; } cutilCheckMsg("kernel failed"); } #endif
faac20afe2134abb101745d4bebe7460124dcc90.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Sudoku -- Puzzle Solver on GPU using CUDA // ---------------------------------------------------------------- /** * @file * sudoku.cu * * @brief main sudoku file to init and execute */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // includes, utilities #include "util/error_utils.cuh" #include "util/io_utils.cuh" #include "data.cuh" // includes, kernels #include "beecolony.cuh" #include "AngelaKernels.cuh" #include "bfsKernel.cuh" void KernelManager(int n, Square * h_unsolved, bool o_graphics) { /* CUDA event setup */ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /* Memory Allocations */ int memsize = sizeof(Square) * n * n; Square * d_unsolved; ERROR_CHECK( hipMalloc((void**) &d_unsolved, memsize) ); /* IMPORTANT: PLEASE ADD THIS IN YOUR KERNEL MANAGER FUNCTION */ /*ERROR_CHECK( hipMemcpy(d_unsolved, h_unsolved, memsize, hipMemcpyHostToDevice) );*/ /* IMPORTANT: END! */ Square * d_solved; ERROR_CHECK( hipMalloc((void**) &d_solved, memsize) ); float elapsedTime; hipEventRecord(start, 0); ArtificialBeeColony (h_unsolved, d_unsolved, d_solved, n); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); Square * h_solved = (Square *) malloc(memsize); ERROR_CHECK( hipMemcpy(h_solved, d_solved, memsize, hipMemcpyDeviceToHost) ); /* Destroy CUDA event */ hipEventDestroy(start); hipEventDestroy(stop); // TODO: Terminal Output will go here. const char * alg = "-bee"; if (!strcmp(alg, "-bee")) { CSRtoCSC(h_solved, n); const char * finished = "/********** Bee Colony (C) **********/"; output(finished, alg, n, false, h_solved); } const char* statistics = "/******* Statistics (Begin) ********/"; printf("%s\n", statistics); printf("Elapsed Time: %f (ms)\n", elapsedTime); const char* statistics_end = "/******** Statistics (End) *********/"; printf("%s\n", statistics_end); /* Free Memory Allocations */ free(h_unsolved); ERROR_CHECK( hipFree(d_unsolved) ); ERROR_CHECK( hipFree(d_solved) ); } int main(int argc, char** argv) { /* Gets arguments from command line and puzzle from a file */ CommandLineArgs * build = new CommandLineArgs; input(argc, argv, build); KernelManager((*build).size, (*build).Puzzle, (*build).graphics); }
faac20afe2134abb101745d4bebe7460124dcc90.cu
// ---------------------------------------------------------------- // Sudoku -- Puzzle Solver on GPU using CUDA // ---------------------------------------------------------------- /** * @file * sudoku.cu * * @brief main sudoku file to init and execute */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cuda.h> #include <cuda_runtime.h> // includes, utilities #include "util/error_utils.cuh" #include "util/io_utils.cuh" #include "data.cuh" // includes, kernels #include "beecolony.cuh" #include "AngelaKernels.cuh" #include "bfsKernel.cuh" void KernelManager(int n, Square * h_unsolved, bool o_graphics) { /* CUDA event setup */ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /* Memory Allocations */ int memsize = sizeof(Square) * n * n; Square * d_unsolved; ERROR_CHECK( cudaMalloc((void**) &d_unsolved, memsize) ); /* IMPORTANT: PLEASE ADD THIS IN YOUR KERNEL MANAGER FUNCTION */ /*ERROR_CHECK( cudaMemcpy(d_unsolved, h_unsolved, memsize, cudaMemcpyHostToDevice) );*/ /* IMPORTANT: END! */ Square * d_solved; ERROR_CHECK( cudaMalloc((void**) &d_solved, memsize) ); float elapsedTime; cudaEventRecord(start, 0); ArtificialBeeColony (h_unsolved, d_unsolved, d_solved, n); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); Square * h_solved = (Square *) malloc(memsize); ERROR_CHECK( cudaMemcpy(h_solved, d_solved, memsize, cudaMemcpyDeviceToHost) ); /* Destroy CUDA event */ cudaEventDestroy(start); cudaEventDestroy(stop); // TODO: Terminal Output will go here. const char * alg = "-bee"; if (!strcmp(alg, "-bee")) { CSRtoCSC(h_solved, n); const char * finished = "/********** Bee Colony (C) **********/"; output(finished, alg, n, false, h_solved); } const char* statistics = "/******* Statistics (Begin) ********/"; printf("%s\n", statistics); printf("Elapsed Time: %f (ms)\n", elapsedTime); const char* statistics_end = "/******** Statistics (End) *********/"; printf("%s\n", statistics_end); /* Free Memory Allocations */ free(h_unsolved); ERROR_CHECK( cudaFree(d_unsolved) ); ERROR_CHECK( cudaFree(d_solved) ); } int main(int argc, char** argv) { /* Gets arguments from command line and puzzle from a file */ CommandLineArgs * build = new CommandLineArgs; input(argc, argv, build); KernelManager((*build).size, (*build).Puzzle, (*build).graphics); }
d8320cb2b7ef7b3e01b12005ed8bfaadcc7118f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) "2019, by Stanford University // Developer: Mario Di Renzo // Affiliation: Center for Turbulence Research, Stanford University // URL: https://ctr.stanford.edu // Citation: Di Renzo, M., Lin, F., and Urzay, J. (2020). // HTR solver: An open-source exascale-oriented task-based // multi-GPU high-order code for hypersonic aerothermodynamics. // Computer Physics Communications 255, 107262" // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "prometeo_variables.hpp" #include "cuda_utils.hpp" // Define a constant memory that will hold the Mixture struct __device__ __constant__ Mix mix; //----------------------------------------------------------------------------- // KERNELS FOR UpdatePropertiesFromPrimitiveTask //----------------------------------------------------------------------------- __global__ void UpdatePropertiesFromPrimitive_kernel(const AccessorRO<double, 3> pressure, const AccessorRO<double, 3> temperature, const AccessorRO<VecNSp, 3> MolarFracs, const AccessorRO< Vec3, 3> velocity, const AccessorWO<VecNSp, 3> MassFracs, const AccessorWO<double, 3> rho, const AccessorWO<double, 3> mu, const AccessorWO<double, 3> lam, const AccessorWO<VecNSp, 3> Di, const AccessorWO<double, 3> SoS, const Rect<3> my_bounds, const coord_t size_x, const coord_t size_y, const coord_t size_z) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if ((x < size_x) && (y < size_y) && (z < size_z)) { const Point<3> p = Point<3>(x + my_bounds.lo.x, y + my_bounds.lo.y, z + my_bounds.lo.z); // TODO: Add a Mixture check UpdatePropertiesFromPrimitiveTask::UpdateProperties( pressure, temperature, MolarFracs, velocity, MassFracs, rho, mu, lam, Di, SoS, p, mix); } } __host__ void UpdatePropertiesFromPrimitiveTask::gpu_base_impl( const Args &args, const std::vector<PhysicalRegion> &regions, const std::vector<Future> &futures, Context ctx, Runtime *runtime) { assert(regions.size() == 3); assert(futures.size() == 0); // Accessors for primitive variables const AccessorRO<double, 3> acc_pressure (regions[0], FID_pressure); const AccessorRO<double, 3> acc_temperature (regions[0], FID_temperature); const AccessorRO<VecNSp, 3> acc_MolarFracs (regions[0], FID_MolarFracs); const AccessorRO< Vec3, 3> acc_velocity (regions[0], FID_velocity); const AccessorWO<VecNSp, 3> acc_MassFracs (regions[1], FID_MassFracs); // Accessors for properties const AccessorWO<double, 3> acc_rho (regions[1], FID_rho); const AccessorWO<double, 3> acc_mu (regions[1], FID_mu); const AccessorWO<double, 3> acc_lam (regions[1], FID_lam); const AccessorWO<VecNSp, 3> acc_Di (regions[1], FID_Di); const AccessorWO<double, 3> acc_SoS (regions[1], FID_SoS); // Extract execution domains Rect<3> r_ModCells = runtime->get_index_space_domain(ctx, args.ModCells.get_index_space()); // Copy the mixture to the device hipMemcpyToSymbolAsync(mix, &(args.mix), sizeof(Mix)); // Launch the kernel const int threads_per_block = 256; const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_ModCells); const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_ModCells) + (TPB_3d.x - 1)) / TPB_3d.x, (getSize<Ydir>(r_ModCells) + (TPB_3d.y - 1)) / TPB_3d.y, (getSize<Zdir>(r_ModCells) + (TPB_3d.z - 1)) / TPB_3d.z); hipLaunchKernelGGL(( UpdatePropertiesFromPrimitive_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0, acc_pressure, acc_temperature, acc_MolarFracs, acc_velocity, acc_MassFracs, acc_rho, acc_mu, acc_lam, acc_Di, acc_SoS, r_ModCells, getSize<Xdir>(r_ModCells), getSize<Ydir>(r_ModCells), getSize<Zdir>(r_ModCells)); } //----------------------------------------------------------------------------- // KERNELS FOR GetVelocityGradientsTask //----------------------------------------------------------------------------- __global__ void GetVelocityGradients_kernel(const AccessorWO< Vec3, 3> vGradX, const AccessorWO< Vec3, 3> vGradY, const AccessorWO< Vec3, 3> vGradZ, const AccessorRO< Vec3, 3> velocity, const AccessorRO< int, 3> nType_x, const AccessorRO< int, 3> nType_y, const AccessorRO< int, 3> nType_z, const AccessorRO<double, 3> dcsi_d, const AccessorRO<double, 3> deta_d, const AccessorRO<double, 3> dzet_d, const Rect<3> my_bounds, const Rect<3> Fluid_bounds, const coord_t size_x, const coord_t size_y, const coord_t size_z, const coord_t dsize_x, const coord_t dsize_y, const coord_t dsize_z) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if ((x < size_x) && (y < size_y) && (z < size_z)) { const Point<3> p = Point<3>(x + my_bounds.lo.x, y + my_bounds.lo.y, z + my_bounds.lo.z); // X gradient GetVelocityGradientsTask::computeDerivatives<Xdir>(vGradX, velocity, p, nType_x[p], dcsi_d[p], dsize_x, Fluid_bounds); // Y gradient GetVelocityGradientsTask::computeDerivatives<Ydir>(vGradY, velocity, p, nType_y[p], deta_d[p], dsize_y, Fluid_bounds); // Z gradient GetVelocityGradientsTask::computeDerivatives<Zdir>(vGradZ, velocity, p, nType_z[p], dzet_d[p], dsize_z, Fluid_bounds); } } __host__ void GetVelocityGradientsTask::gpu_base_impl( const Args &args, const std::vector<PhysicalRegion> &regions, const std::vector<Future> &futures, Context ctx, Runtime *runtime) { assert(regions.size() == 3); assert(futures.size() == 0); // Accessors for variables in the Ghost regions const AccessorRO< Vec3, 3> acc_velocity (regions[0], FID_velocity); // Accessors for metrics const AccessorRO< int, 3> acc_nType_x (regions[1], FID_nType_x); const AccessorRO< int, 3> acc_nType_y (regions[1], FID_nType_y); const AccessorRO< int, 3> acc_nType_z (regions[1], FID_nType_z); const AccessorRO<double, 3> acc_dcsi_d (regions[1], FID_dcsi_d); const AccessorRO<double, 3> acc_deta_d (regions[1], FID_deta_d); const AccessorRO<double, 3> acc_dzet_d (regions[1], FID_dzet_d); // Accessors for gradients const AccessorWO< Vec3, 3> acc_vGradX (regions[2], FID_velocityGradientX); const AccessorWO< Vec3, 3> acc_vGradY (regions[2], FID_velocityGradientY); const AccessorWO< Vec3, 3> acc_vGradZ (regions[2], FID_velocityGradientZ); // Extract execution domains Rect<3> r_MyFluid = runtime->get_index_space_domain(ctx, args.Fluid.get_index_space()); Rect<3> Fluid_bounds = args.Fluid_bounds; // Get domain sizes const coord_t xsize = getSize<Xdir>(Fluid_bounds); const coord_t ysize = getSize<Ydir>(Fluid_bounds); const coord_t zsize = getSize<Zdir>(Fluid_bounds); // Launch the kernel const int threads_per_block = 256; const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_MyFluid); const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_MyFluid) + (TPB_3d.x - 1)) / TPB_3d.x, (getSize<Ydir>(r_MyFluid) + (TPB_3d.y - 1)) / TPB_3d.y, (getSize<Zdir>(r_MyFluid) + (TPB_3d.z - 1)) / TPB_3d.z); hipLaunchKernelGGL(( GetVelocityGradients_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0, acc_vGradX, acc_vGradY, acc_vGradZ, acc_velocity, acc_nType_x, acc_nType_y, acc_nType_z, acc_dcsi_d, acc_deta_d, acc_dzet_d, r_MyFluid, Fluid_bounds, getSize<Xdir>(r_MyFluid), getSize<Ydir>(r_MyFluid), getSize<Zdir>(r_MyFluid), xsize, ysize, zsize); } //----------------------------------------------------------------------------- // KERNELS FOR GetTemperatureGradientTask //----------------------------------------------------------------------------- __global__ void GetTemperatureGradient_kernel(const AccessorWO< Vec3, 3> tGrad, const AccessorRO<double, 3> temperature, const AccessorRO< int, 3> nType_x, const AccessorRO< int, 3> nType_y, const AccessorRO< int, 3> nType_z, const AccessorRO<double, 3> dcsi_d, const AccessorRO<double, 3> deta_d, const AccessorRO<double, 3> dzet_d, const Rect<3> my_bounds, const Rect<3> Fluid_bounds, const coord_t size_x, const coord_t size_y, const coord_t size_z, const coord_t dsize_x, const coord_t dsize_y, const coord_t dsize_z) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if ((x < size_x) && (y < size_y) && (z < size_z)) { const Point<3> p = Point<3>(x + my_bounds.lo.x, y + my_bounds.lo.y, z + my_bounds.lo.z); tGrad[p][0] = GetTemperatureGradientTask::computeDerivative<Xdir>(temperature, p, nType_x[p], dcsi_d[p], dsize_x, Fluid_bounds); tGrad[p][1] = GetTemperatureGradientTask::computeDerivative<Ydir>(temperature, p, nType_y[p], deta_d[p], dsize_y, Fluid_bounds); tGrad[p][2] = GetTemperatureGradientTask::computeDerivative<Zdir>(temperature, p, nType_z[p], dzet_d[p], dsize_z, Fluid_bounds); } } __host__ void GetTemperatureGradientTask::gpu_base_impl( const Args &args, const std::vector<PhysicalRegion> &regions, const std::vector<Future> &futures, Context ctx, Runtime *runtime) { assert(regions.size() == 3); assert(futures.size() == 0); // Accessors for variables in the Ghost regions const AccessorRO<double, 3> acc_temperature (regions[0], FID_temperature); // Accessors for metrics const AccessorRO< int, 3> acc_nType_x (regions[1], FID_nType_x); const AccessorRO< int, 3> acc_nType_y (regions[1], FID_nType_y); const AccessorRO< int, 3> acc_nType_z (regions[1], FID_nType_z); const AccessorRO<double, 3> acc_dcsi_d (regions[1], FID_dcsi_d); const AccessorRO<double, 3> acc_deta_d (regions[1], FID_deta_d); const AccessorRO<double, 3> acc_dzet_d (regions[1], FID_dzet_d); // Accessors for gradients const AccessorWO< Vec3, 3> acc_tGrad (regions[2], FID_temperatureGradient); // Extract execution domains Rect<3> r_MyFluid = runtime->get_index_space_domain(ctx, args.Fluid.get_index_space()); Rect<3> Fluid_bounds = args.Fluid_bounds; // Get domain sizes const coord_t xsize = getSize<Xdir>(Fluid_bounds); const coord_t ysize = getSize<Ydir>(Fluid_bounds); const coord_t zsize = getSize<Zdir>(Fluid_bounds); // Launch the kernel const int threads_per_block = 256; const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_MyFluid); const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_MyFluid) + (TPB_3d.x - 1)) / TPB_3d.x, (getSize<Ydir>(r_MyFluid) + (TPB_3d.y - 1)) / TPB_3d.y, (getSize<Zdir>(r_MyFluid) + (TPB_3d.z - 1)) / TPB_3d.z); hipLaunchKernelGGL(( GetTemperatureGradient_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0, acc_tGrad, acc_temperature, acc_nType_x, acc_nType_y, acc_nType_z, acc_dcsi_d, acc_deta_d, acc_dzet_d, r_MyFluid, Fluid_bounds, getSize<Xdir>(r_MyFluid), getSize<Ydir>(r_MyFluid), getSize<Zdir>(r_MyFluid), xsize, ysize, zsize); }
d8320cb2b7ef7b3e01b12005ed8bfaadcc7118f1.cu
// Copyright (c) "2019, by Stanford University // Developer: Mario Di Renzo // Affiliation: Center for Turbulence Research, Stanford University // URL: https://ctr.stanford.edu // Citation: Di Renzo, M., Lin, F., and Urzay, J. (2020). // HTR solver: An open-source exascale-oriented task-based // multi-GPU high-order code for hypersonic aerothermodynamics. // Computer Physics Communications 255, 107262" // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "prometeo_variables.hpp" #include "cuda_utils.hpp" // Define a constant memory that will hold the Mixture struct __device__ __constant__ Mix mix; //----------------------------------------------------------------------------- // KERNELS FOR UpdatePropertiesFromPrimitiveTask //----------------------------------------------------------------------------- __global__ void UpdatePropertiesFromPrimitive_kernel(const AccessorRO<double, 3> pressure, const AccessorRO<double, 3> temperature, const AccessorRO<VecNSp, 3> MolarFracs, const AccessorRO< Vec3, 3> velocity, const AccessorWO<VecNSp, 3> MassFracs, const AccessorWO<double, 3> rho, const AccessorWO<double, 3> mu, const AccessorWO<double, 3> lam, const AccessorWO<VecNSp, 3> Di, const AccessorWO<double, 3> SoS, const Rect<3> my_bounds, const coord_t size_x, const coord_t size_y, const coord_t size_z) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if ((x < size_x) && (y < size_y) && (z < size_z)) { const Point<3> p = Point<3>(x + my_bounds.lo.x, y + my_bounds.lo.y, z + my_bounds.lo.z); // TODO: Add a Mixture check UpdatePropertiesFromPrimitiveTask::UpdateProperties( pressure, temperature, MolarFracs, velocity, MassFracs, rho, mu, lam, Di, SoS, p, mix); } } __host__ void UpdatePropertiesFromPrimitiveTask::gpu_base_impl( const Args &args, const std::vector<PhysicalRegion> &regions, const std::vector<Future> &futures, Context ctx, Runtime *runtime) { assert(regions.size() == 3); assert(futures.size() == 0); // Accessors for primitive variables const AccessorRO<double, 3> acc_pressure (regions[0], FID_pressure); const AccessorRO<double, 3> acc_temperature (regions[0], FID_temperature); const AccessorRO<VecNSp, 3> acc_MolarFracs (regions[0], FID_MolarFracs); const AccessorRO< Vec3, 3> acc_velocity (regions[0], FID_velocity); const AccessorWO<VecNSp, 3> acc_MassFracs (regions[1], FID_MassFracs); // Accessors for properties const AccessorWO<double, 3> acc_rho (regions[1], FID_rho); const AccessorWO<double, 3> acc_mu (regions[1], FID_mu); const AccessorWO<double, 3> acc_lam (regions[1], FID_lam); const AccessorWO<VecNSp, 3> acc_Di (regions[1], FID_Di); const AccessorWO<double, 3> acc_SoS (regions[1], FID_SoS); // Extract execution domains Rect<3> r_ModCells = runtime->get_index_space_domain(ctx, args.ModCells.get_index_space()); // Copy the mixture to the device cudaMemcpyToSymbolAsync(mix, &(args.mix), sizeof(Mix)); // Launch the kernel const int threads_per_block = 256; const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_ModCells); const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_ModCells) + (TPB_3d.x - 1)) / TPB_3d.x, (getSize<Ydir>(r_ModCells) + (TPB_3d.y - 1)) / TPB_3d.y, (getSize<Zdir>(r_ModCells) + (TPB_3d.z - 1)) / TPB_3d.z); UpdatePropertiesFromPrimitive_kernel<<<num_blocks_3d, TPB_3d>>>( acc_pressure, acc_temperature, acc_MolarFracs, acc_velocity, acc_MassFracs, acc_rho, acc_mu, acc_lam, acc_Di, acc_SoS, r_ModCells, getSize<Xdir>(r_ModCells), getSize<Ydir>(r_ModCells), getSize<Zdir>(r_ModCells)); } //----------------------------------------------------------------------------- // KERNELS FOR GetVelocityGradientsTask //----------------------------------------------------------------------------- __global__ void GetVelocityGradients_kernel(const AccessorWO< Vec3, 3> vGradX, const AccessorWO< Vec3, 3> vGradY, const AccessorWO< Vec3, 3> vGradZ, const AccessorRO< Vec3, 3> velocity, const AccessorRO< int, 3> nType_x, const AccessorRO< int, 3> nType_y, const AccessorRO< int, 3> nType_z, const AccessorRO<double, 3> dcsi_d, const AccessorRO<double, 3> deta_d, const AccessorRO<double, 3> dzet_d, const Rect<3> my_bounds, const Rect<3> Fluid_bounds, const coord_t size_x, const coord_t size_y, const coord_t size_z, const coord_t dsize_x, const coord_t dsize_y, const coord_t dsize_z) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if ((x < size_x) && (y < size_y) && (z < size_z)) { const Point<3> p = Point<3>(x + my_bounds.lo.x, y + my_bounds.lo.y, z + my_bounds.lo.z); // X gradient GetVelocityGradientsTask::computeDerivatives<Xdir>(vGradX, velocity, p, nType_x[p], dcsi_d[p], dsize_x, Fluid_bounds); // Y gradient GetVelocityGradientsTask::computeDerivatives<Ydir>(vGradY, velocity, p, nType_y[p], deta_d[p], dsize_y, Fluid_bounds); // Z gradient GetVelocityGradientsTask::computeDerivatives<Zdir>(vGradZ, velocity, p, nType_z[p], dzet_d[p], dsize_z, Fluid_bounds); } } __host__ void GetVelocityGradientsTask::gpu_base_impl( const Args &args, const std::vector<PhysicalRegion> &regions, const std::vector<Future> &futures, Context ctx, Runtime *runtime) { assert(regions.size() == 3); assert(futures.size() == 0); // Accessors for variables in the Ghost regions const AccessorRO< Vec3, 3> acc_velocity (regions[0], FID_velocity); // Accessors for metrics const AccessorRO< int, 3> acc_nType_x (regions[1], FID_nType_x); const AccessorRO< int, 3> acc_nType_y (regions[1], FID_nType_y); const AccessorRO< int, 3> acc_nType_z (regions[1], FID_nType_z); const AccessorRO<double, 3> acc_dcsi_d (regions[1], FID_dcsi_d); const AccessorRO<double, 3> acc_deta_d (regions[1], FID_deta_d); const AccessorRO<double, 3> acc_dzet_d (regions[1], FID_dzet_d); // Accessors for gradients const AccessorWO< Vec3, 3> acc_vGradX (regions[2], FID_velocityGradientX); const AccessorWO< Vec3, 3> acc_vGradY (regions[2], FID_velocityGradientY); const AccessorWO< Vec3, 3> acc_vGradZ (regions[2], FID_velocityGradientZ); // Extract execution domains Rect<3> r_MyFluid = runtime->get_index_space_domain(ctx, args.Fluid.get_index_space()); Rect<3> Fluid_bounds = args.Fluid_bounds; // Get domain sizes const coord_t xsize = getSize<Xdir>(Fluid_bounds); const coord_t ysize = getSize<Ydir>(Fluid_bounds); const coord_t zsize = getSize<Zdir>(Fluid_bounds); // Launch the kernel const int threads_per_block = 256; const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_MyFluid); const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_MyFluid) + (TPB_3d.x - 1)) / TPB_3d.x, (getSize<Ydir>(r_MyFluid) + (TPB_3d.y - 1)) / TPB_3d.y, (getSize<Zdir>(r_MyFluid) + (TPB_3d.z - 1)) / TPB_3d.z); GetVelocityGradients_kernel<<<num_blocks_3d, TPB_3d>>>( acc_vGradX, acc_vGradY, acc_vGradZ, acc_velocity, acc_nType_x, acc_nType_y, acc_nType_z, acc_dcsi_d, acc_deta_d, acc_dzet_d, r_MyFluid, Fluid_bounds, getSize<Xdir>(r_MyFluid), getSize<Ydir>(r_MyFluid), getSize<Zdir>(r_MyFluid), xsize, ysize, zsize); } //----------------------------------------------------------------------------- // KERNELS FOR GetTemperatureGradientTask //----------------------------------------------------------------------------- __global__ void GetTemperatureGradient_kernel(const AccessorWO< Vec3, 3> tGrad, const AccessorRO<double, 3> temperature, const AccessorRO< int, 3> nType_x, const AccessorRO< int, 3> nType_y, const AccessorRO< int, 3> nType_z, const AccessorRO<double, 3> dcsi_d, const AccessorRO<double, 3> deta_d, const AccessorRO<double, 3> dzet_d, const Rect<3> my_bounds, const Rect<3> Fluid_bounds, const coord_t size_x, const coord_t size_y, const coord_t size_z, const coord_t dsize_x, const coord_t dsize_y, const coord_t dsize_z) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if ((x < size_x) && (y < size_y) && (z < size_z)) { const Point<3> p = Point<3>(x + my_bounds.lo.x, y + my_bounds.lo.y, z + my_bounds.lo.z); tGrad[p][0] = GetTemperatureGradientTask::computeDerivative<Xdir>(temperature, p, nType_x[p], dcsi_d[p], dsize_x, Fluid_bounds); tGrad[p][1] = GetTemperatureGradientTask::computeDerivative<Ydir>(temperature, p, nType_y[p], deta_d[p], dsize_y, Fluid_bounds); tGrad[p][2] = GetTemperatureGradientTask::computeDerivative<Zdir>(temperature, p, nType_z[p], dzet_d[p], dsize_z, Fluid_bounds); } } __host__ void GetTemperatureGradientTask::gpu_base_impl( const Args &args, const std::vector<PhysicalRegion> &regions, const std::vector<Future> &futures, Context ctx, Runtime *runtime) { assert(regions.size() == 3); assert(futures.size() == 0); // Accessors for variables in the Ghost regions const AccessorRO<double, 3> acc_temperature (regions[0], FID_temperature); // Accessors for metrics const AccessorRO< int, 3> acc_nType_x (regions[1], FID_nType_x); const AccessorRO< int, 3> acc_nType_y (regions[1], FID_nType_y); const AccessorRO< int, 3> acc_nType_z (regions[1], FID_nType_z); const AccessorRO<double, 3> acc_dcsi_d (regions[1], FID_dcsi_d); const AccessorRO<double, 3> acc_deta_d (regions[1], FID_deta_d); const AccessorRO<double, 3> acc_dzet_d (regions[1], FID_dzet_d); // Accessors for gradients const AccessorWO< Vec3, 3> acc_tGrad (regions[2], FID_temperatureGradient); // Extract execution domains Rect<3> r_MyFluid = runtime->get_index_space_domain(ctx, args.Fluid.get_index_space()); Rect<3> Fluid_bounds = args.Fluid_bounds; // Get domain sizes const coord_t xsize = getSize<Xdir>(Fluid_bounds); const coord_t ysize = getSize<Ydir>(Fluid_bounds); const coord_t zsize = getSize<Zdir>(Fluid_bounds); // Launch the kernel const int threads_per_block = 256; const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_MyFluid); const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_MyFluid) + (TPB_3d.x - 1)) / TPB_3d.x, (getSize<Ydir>(r_MyFluid) + (TPB_3d.y - 1)) / TPB_3d.y, (getSize<Zdir>(r_MyFluid) + (TPB_3d.z - 1)) / TPB_3d.z); GetTemperatureGradient_kernel<<<num_blocks_3d, TPB_3d>>>( acc_tGrad, acc_temperature, acc_nType_x, acc_nType_y, acc_nType_z, acc_dcsi_d, acc_deta_d, acc_dzet_d, r_MyFluid, Fluid_bounds, getSize<Xdir>(r_MyFluid), getSize<Ydir>(r_MyFluid), getSize<Zdir>(r_MyFluid), xsize, ysize, zsize); }
91b8ec1c66ff096fac3141ebe6f328c5a8701769.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <cstdio> #include <cmath> #include <iostream> #include <vector> #include <thread> #include <chrono> #include <algorithm> #include "myio.h" #include "mylib.h" #include "wave3d.h" #include "boundary.h" #include "check.h" #include "conversions.h" #include "cluster.h" using namespace std; void rtm3d(float *image,int nx,int ny,int nz,int npad,float oz,float dz,float wbottom,vector<int> &shotid,int max_shot_per_job,int icall,const string &command){ random_shuffle(shotid.begin(),shotid.end()); long long nxy=nx*ny,nxyz=nxy*nz; vector<Job> jobs; vector<int> nfail; int nshotleft=shotid.size(),njob=0,njobtotal=(shotid.size()+max_shot_per_job-1)/max_shot_per_job; while(nshotleft>0){ int nshot1job=min(nshotleft,max_shot_per_job); string shotname="icall_"+to_string(icall)+"_shot_"; string shotlist; for(int i=0;i<nshot1job;i++){ int id=i+njob*max_shot_per_job; if(i<nshot1job-1){ shotname+=to_string(shotid[id])+"_"; shotlist+=to_string(shotid[id])+","; } else{ shotname+=to_string(shotid[id]); shotlist+=to_string(shotid[id]); } } string scriptfile="./scripts/submit_"+shotname+".sh"; string jobname=shotname; string outfile="./output/"+shotname+".log"; string gradfile="./grads/image_"+shotname+".H"; string command1=command+" image="+gradfile+" shotid="+shotlist; genScript(scriptfile,jobname,outfile,command1); string id=submitScript(scriptfile); // string id=to_string(njob); string state; int nerror=0; while(id.compare("error")==0 && nerror<MAX_FAIL){ this_thread::sleep_for(chrono::seconds(5)); id=submitPBSScript(scriptfile); nerror++; } if(id.compare("error")!=0){ state="SUBMITTED"; int idx=njob; Job job(idx,id,scriptfile,outfile,gradfile,state); jobs.push_back(job); nfail.push_back(0); } else fprintf(stderr,"job %s reaches MAX_FAIL %d\n",jobname.c_str(),MAX_FAIL); njob++; nshotleft-=nshot1job; } njob=jobs.size(); this_thread::sleep_for(chrono::seconds(15)); cout<<"submitted "<<njob<<" jobs"<<endl; // for(int i=0;i<jobs.size();i++) jobs[i].printJob(); int ncompleted=0; float *image0=new float[nxyz](); while(ncompleted<njob){ for(int i=0;i<jobs.size();i++){ string id=jobs[i]._jobId; int idx=jobs[i]._jobIdx; string jobstate=jobs[i]._jobState; if(jobstate.compare("COMPLETED")!=0 && jobstate.compare("FAILED")!=0 && jobstate.compare("TIMEOUT")!=0){ string state=getJobState(id); // string state="COMPLETED"; if(state.compare("COMPLETED")==0){ cout<<"job "<<idx<<" id "<<id<<" state "<<state<<endl; ncompleted++; readFromHeader(jobs[i]._gradFile,image,nxyz); cout<<"summing image from file "<<jobs[i]._gradFile<<endl; #pragma omp parallel for for(int i=0;i<nxyz;i++) image0[i]+=image[i]; } else if(state.compare("FAILED")==0 || state.compare("TIMEOUT")==0){ cout<<"job "<<idx<<" id "<<id<<" state "<<state<<endl; nfail[idx]++; if(nfail[idx]>MAX_FAIL){ cout<<"job "<<idx<<" reached MAX_FAIL "<<MAX_FAIL<<endl; ncompleted++; continue; } cout<<" resubmitting"<<endl; Job newjob=jobs[i]; string newid=submitScript(newjob._scriptFile); // string newid=to_string(idx); if(newid.compare("error")!=0) newjob.setJobState("SUBMITTED"); newjob.setJobId(newid); jobs.push_back(newjob); } jobs[i].setJobState(state); } } } zeroBoundary(image0,nx,ny,nz,npad); int nwbottom=(wbottom-oz)/dz+1-npad; memset(image0+npad*nxy,0,nwbottom*nxy*sizeof(float)); #pragma omp parallel for num_threads(16) for(int iz=1;iz<nz-1;iz++){ for(int iy=1;iy<ny-1;iy++){ #pragma omp simd for(int ix=1;ix<nx-1;ix++){ size_t i=ix+iy*nx+iz*nxy; image[i]=image0[i+1]+image0[i-1]+image0[i+nx]+image0[i-nx]+image0[i+nxy]+image0[i-nxy]-6.f*image0[i]; } } } delete []image0; // for(int i=0;i<jobs.size();i++) jobs[i].printJob(); return; } void rtmVEpsDel(float *image,float *souloc,int ns,vector<int> &shotid,float *recloc,float *wavelet,float *v,float *eps,float *del,float *randboundaryV,int nx,int ny,int nz,int nt,int npad,float ox,float oy,float oz,float ot,float dx,float dy,float dz,float dt,float samplingRate,float wbottom){ long long nxy=nx*ny; long long nxyz=nxy*nz; float *c11=new float[nxyz]; float *c13=new float[nxyz]; float *c33=new float[nxyz]; // putBoundary(randboundaryV,v,nx,ny,nz,npad); VEpsDel2Cij(c11,c13,c33,v,eps,del,1.,1.,1.,nxyz); rtmCij3d_f(image,souloc,ns,shotid,recloc,wavelet,c11,c13,c33,nx,ny,nz,nt,npad,ox,oy,oz,ot,dx,dy,dz,dt,samplingRate); zeroBoundary(image,nx,ny,nz,npad); int nwbottom=(wbottom-oz)/dz+1-npad; memset(image+npad*nxy,0,nwbottom*nxy*sizeof(float)); delete []c11;delete []c13;delete []c33; return; } void rtmCij3d_f(float *image,float *souloc,int ns,vector<int> &shotid,float *recloc,float *wavelet,float *c11,float *c13,float *c33,int nx,int ny,int nz,int nt,int npad,float ox,float oy,float oz,float ot,float dx,float dy,float dz,float dt,float samplingRate){ vector<int> GPUs; get_array("gpu",GPUs); int NGPU=GPUs.size(); // fprintf(stderr,"Total # GPUs = %d\n",NGPU); // fprintf(stderr,"GPUs used are:\n"); // for(int i=0;i<NGPU;i++) fprintf(stderr,"%d ",GPUs[i]); // fprintf(stderr,"\n"); float dx2=dx*dx,dy2=dy*dy,dz2=dz*dz,dt2=dt*dt; float dt2dx2=dt2/dx2,dt2dy2=dt2/dy2,dt2dz2=dt2/dz2; int nxy=nx*ny; long long nxyz=nxy*nz; int samplingTimeStep=std::round(samplingRate/dt); int nnt=(nt-1)/samplingTimeStep+1; memset(image,0,nxyz*sizeof(float)); //this is to store multiple images // float *image1=new float[nxyz](); // float *image2=new float[nxyz](); // float *image3=new float[nxyz](); // float *damping=new float[nxy]; // init_abc(damping,nx,ny,npad); float *damping=new float[nxy+nz]; init_abc(damping,nx,ny,nz,npad); float **d_damping=new float*[NGPU](); float *prevSigmaX=new float[nxyz]; float *curSigmaX=new float[nxyz]; float *prevSigmaZ=new float[nxyz]; float *curSigmaZ=new float[nxyz]; float *prevLambdaX=new float[nxyz]; float *curLambdaX=new float[nxyz]; float *prevLambdaZ=new float[nxyz]; float *curLambdaZ=new float[nxyz]; size_t nElemBlock=HALF_STENCIL*nxy; size_t nByteBlock=nElemBlock*sizeof(float); int nb=nz/HALF_STENCIL; float *h_c11[2],*h_c13[2],*h_c33[2]; float *h_prevSigmaX[2],*h_curSigmaX[2],*h_SigmaX4[2],*h_SigmaX5[2]; float *h_prevSigmaZ[2],*h_curSigmaZ[2],*h_SigmaZ4[2],*h_SigmaZ5[2]; float *h_prevLambdaX[2],*h_curLambdaX[2],*h_LambdaX4[2],*h_LambdaX5[2]; float *h_prevLambdaZ[2],*h_curLambdaZ[2],*h_LambdaZ4[2],*h_LambdaZ5[2]; float *h_data[2]; float *h_imagei[2],*h_imageo[2]; for(int i=0;i<2;++i){ hipHostMalloc(&h_c11[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_c13[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_c33[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_prevSigmaX[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_curSigmaX[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_SigmaX4[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_SigmaX5[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_prevSigmaZ[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_curSigmaZ[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_SigmaZ4[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_SigmaZ5[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_prevLambdaX[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_curLambdaX[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_LambdaX4[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_LambdaX5[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_prevLambdaZ[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_curLambdaZ[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_LambdaZ4[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_LambdaZ5[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_imagei[i],nByteBlock,hipHostMallocDefault); hipHostMalloc(&h_imageo[i],nByteBlock,hipHostMallocDefault); } const int nbuffSigma=NUPDATE+2; int **d_recIndex=new int*[NGPU](); float ***d_data=new float**[NGPU](); float ****d_SigmaX=new float ***[NGPU](); float ****d_SigmaZ=new float ***[NGPU](); float ****d_LambdaX=new float ***[NGPU](); float ****d_LambdaZ=new float ***[NGPU](); const int nbuffCij=NUPDATE+4; float ***d_c11=new float**[NGPU](); float ***d_c13=new float**[NGPU](); float ***d_c33=new float**[NGPU](); float ***d_image=new float**[NGPU](); hipStream_t *transfInStream=new hipStream_t[1](); hipStream_t *transfOutStream=new hipStream_t[NGPU](); hipStream_t *computeStream=new hipStream_t[NGPU](); dim3 block(BLOCK_DIM,BLOCK_DIM); dim3 grid((nx-2*HALF_STENCIL+BLOCK_DIM-1)/BLOCK_DIM,(ny-2*HALF_STENCIL+BLOCK_DIM-1)/BLOCK_DIM); for(int gpu=0;gpu<NGPU;gpu++){ float mem=0.; hipSetDevice(GPUs[gpu]); // hipMalloc(&d_damping[gpu],nxy*sizeof(float)); // hipMemcpy(d_damping[gpu],damping,nxy*sizeof(float),hipMemcpyHostToDevice); hipMalloc(&d_damping[gpu],(nxy+nz)*sizeof(float)); mem+=(nxy+nz)*sizeof(float); hipMemcpy(d_damping[gpu],damping,(nxy+nz)*sizeof(float),hipMemcpyHostToDevice); d_SigmaX[gpu]=new float**[nbuffSigma](); d_SigmaZ[gpu]=new float**[nbuffSigma](); for(int i=0;i<nbuffSigma;++i){ d_SigmaX[gpu][i]=new float*[4](); d_SigmaZ[gpu][i]=new float*[4](); for(int j=0;j<4;++j){ hipMalloc(&d_SigmaX[gpu][i][j],nByteBlock); hipMalloc(&d_SigmaZ[gpu][i][j],nByteBlock); mem+=2*nByteBlock; } } d_LambdaX[gpu]=new float**[nbuffSigma](); d_LambdaZ[gpu]=new float**[nbuffSigma](); for(int i=0;i<nbuffSigma;++i){ d_LambdaX[gpu][i]=new float*[4](); d_LambdaZ[gpu][i]=new float*[4](); for(int j=0;j<4;++j){ hipMalloc(&d_LambdaX[gpu][i][j],nByteBlock); hipMalloc(&d_LambdaZ[gpu][i][j],nByteBlock); mem+=2*nByteBlock; } } d_c11[gpu]=new float*[nbuffCij](); d_c13[gpu]=new float*[nbuffCij](); d_c33[gpu]=new float*[nbuffCij](); for(int i=0;i<nbuffCij;++i){ hipMalloc(&d_c11[gpu][i],nByteBlock); hipMalloc(&d_c13[gpu][i],nByteBlock); hipMalloc(&d_c33[gpu][i],nByteBlock); mem+=3*nByteBlock; } d_image[gpu]=new float*[nbuffCij](); for(int i=0;i<nbuffCij;++i){ hipMalloc(&d_image[gpu][i],nByteBlock); mem+=nByteBlock; } d_data[gpu]=new float*[2](); if(gpu==0) hipStreamCreate(&transfInStream[gpu]); hipStreamCreate(&computeStream[gpu]); hipStreamCreate(&transfOutStream[gpu]); fprintf(stderr,"gpu %d allocates %f GB\n",GPUs[gpu],mem*1e-9); hipError_t e=hipGetLastError(); if(e!=hipSuccess) fprintf(stderr,"GPU %d alloc error %s\n",gpu,hipGetErrorString(e)); } vector<thread> threads; int pipelen=NGPU*(NUPDATE+3)+3; int nround=(nt-2)/(NGPU*NUPDATE); int roundlen=max(pipelen,nb); int nroundlen=nround*roundlen;; int nk=(nround-1)*roundlen+pipelen+nb-1; // fprintf(stderr,"pipelen=%d nround=%d roundlen=%d nk=%d\n",pipelen,nround,roundlen,nk); int recBlock=(recloc[2]-oz)/dz/HALF_STENCIL; //assume all receivers are at same depth for(vector<int>::iterator id=shotid.begin();id!=shotid.end();id++){ int is=*id; fprintf(stderr,"shot # %d\n",is); int nr=souloc[5*is+3]; int irbegin=souloc[5*is+4]; int *recIndex=new int[nr]; int *recIndexBlock=new int[nr]; float *data=new float[nnt*nr](); #pragma omp parallel for num_threads(16) for(int ir=0;ir<nr;ir++){ int ir1=ir+irbegin; int ix=(recloc[3*ir1]-ox)/dx; int iy=(recloc[3*ir1+1]-oy)/dy; int iz=(recloc[3*ir1+2]-oz)/dz; int ixy=ix+iy*nx; recIndex[ir]=ixy+iz*nxy; recIndexBlock[ir]=ixy+(iz%HALF_STENCIL)*nxy; } int souIndexX=(souloc[5*is]-ox)/dx; int souIndexY=(souloc[5*is+1]-oy)/dy; int souIndexZ=(souloc[5*is+2]-oz)/dz; int souIndex=souIndexX+souIndexY*nx+souIndexZ*nxy; int souIndexBlock=souIndexX+souIndexY*nx+(souIndexZ%HALF_STENCIL)*nxy; int souBlock=souIndexZ/HALF_STENCIL; hipHostMalloc(&h_data[0],nr*sizeof(float),hipHostMallocDefault); hipHostMalloc(&h_data[1],nr*sizeof(float),hipHostMallocDefault); for(int gpu=0;gpu<NGPU;gpu++){ hipSetDevice(GPUs[gpu]); hipMalloc(&d_recIndex[gpu],nr*sizeof(int)); hipMemcpy(d_recIndex[gpu],recIndexBlock,nr*sizeof(int),hipMemcpyHostToDevice); hipMalloc(&d_data[gpu][0],nr*sizeof(float)); hipMalloc(&d_data[gpu][1],nr*sizeof(float)); // hipError_t e=hipGetLastError(); // if(e!=hipSuccess) fprintf(stderr,"shot %d GPU %d alloc error %s\n",is,gpu,hipGetErrorString(e)); } // fprintf(stderr,"fowrard propagation with random boundary\n"); memset(prevSigmaX,0,nxyz*sizeof(float)); memset(curSigmaX,0,nxyz*sizeof(float)); memset(prevSigmaZ,0,nxyz*sizeof(float)); memset(curSigmaZ,0,nxyz*sizeof(float)); //injecting source at time 0 to wavefields at time 1 float temp=dt2*wavelet[0]; curSigmaX[souIndex]=temp; curSigmaZ[souIndex]=temp; for(int k=0;k<nk;k++){ if(k<nroundlen){ int ib=k%roundlen; if(ib<nb){ size_t ibn=ib*nElemBlock; int k2=k%2; threads.push_back(thread(memcpyCpuToCpu3,h_c11[k2],c11+ibn,h_c13[k2],c13+ibn,h_c33[k2],c33+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevSigmaX[k2],prevSigmaX+ibn,h_curSigmaX[k2],curSigmaX+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevSigmaZ[k2],prevSigmaZ+ibn,h_curSigmaZ[k2],curSigmaZ+ibn,nByteBlock)); } } if(k>0 && k<=nroundlen){ int ib=(k-1)%roundlen; if(ib<nb){ int k12=(k-1)%2,kn=k%nbuffCij,k4=k%4; hipSetDevice(GPUs[0]); memcpyCpuToGpu2(d_SigmaX[0][0][k4],h_prevSigmaX[k12],d_SigmaX[0][1][k4],h_curSigmaX[k12],nByteBlock,transfInStream); memcpyCpuToGpu2(d_SigmaZ[0][0][k4],h_prevSigmaZ[k12],d_SigmaZ[0][1][k4],h_curSigmaZ[k12],nByteBlock,transfInStream); memcpyCpuToGpu3(d_c11[0][kn],h_c11[k12],d_c13[0][kn],h_c13[k12],d_c33[0][kn],h_c33[k12],nByteBlock,transfInStream); } } for(int gpu=0;gpu<NGPU;gpu++){ int kgpu=k-gpu*(NUPDATE+3); hipSetDevice(GPUs[gpu]); if(kgpu>2 && kgpu<=NUPDATE+1+nroundlen){ for(int i=0;i<NUPDATE;i++){ int ib=(kgpu-3-i)%roundlen; int iround=(kgpu-3-i)/roundlen; if(ib>=0 && ib<nb && iround>=0 && iround<nround){ int it=iround*NGPU*NUPDATE+gpu*NUPDATE+2+i; int ki=kgpu-i,ki14=(ki-1)%4,ki24=(ki-2)%4,ki34=(ki-3)%4,ki2n=(ki-2)%nbuffCij; if(ib==0){ hipLaunchKernelGGL(( forwardKernelTopBlock), dim3(grid),dim3(block),0,computeStream[gpu], d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } else if(ib==nb-1){ hipLaunchKernelGGL(( forwardKernelBottomBlock), dim3(grid),dim3(block),0,computeStream[gpu], d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } else{ hipLaunchKernelGGL(( forwardKernel), dim3(grid),dim3(block),0,computeStream[gpu], d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } if(ib==souBlock){ float source=dt2*wavelet[it-1]; hipLaunchKernelGGL(( injectSource), dim3(1),dim3(1),0,computeStream[gpu], d_SigmaX[gpu][i+2][ki24],d_SigmaZ[gpu][i+2][ki24],source,souIndexBlock); } } } } if(kgpu>NUPDATE+3 && kgpu<=NUPDATE+3+nroundlen){ int ib=(kgpu-NUPDATE-4)%roundlen; if(ib<nb){ if(NGPU>1 && gpu<NGPU-1){ int n2=nbuffSigma-2,n1=nbuffSigma-1,kn3=kgpu-NUPDATE-3,kn34=kn3%4,kn3n=kn3%nbuffCij; memcpyGpuToGpu2(d_SigmaX[gpu+1][0][kn34],d_SigmaX[gpu][n2][kn34],d_SigmaX[gpu+1][1][kn34],d_SigmaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu2(d_SigmaZ[gpu+1][0][kn34],d_SigmaZ[gpu][n2][kn34],d_SigmaZ[gpu+1][1][kn34],d_SigmaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu3(d_c11[gpu+1][kn3n],d_c11[gpu][kn3n],d_c13[gpu+1][kn3n],d_c13[gpu][kn3n],d_c33[gpu+1][kn3n],d_c33[gpu][kn3n],nByteBlock,transfOutStream+gpu); } else{ int n2=nbuffSigma-2,n1=nbuffSigma-1,k2=k%2,kn34=(kgpu-NUPDATE-3)%4; memcpyGpuToCpu2(h_SigmaX4[k2],d_SigmaX[gpu][n2][kn34],h_SigmaX5[k2],d_SigmaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToCpu2(h_SigmaZ4[k2],d_SigmaZ[gpu][n2][kn34],h_SigmaZ5[k2],d_SigmaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); } } } // hipError_t e=hipGetLastError(); // if(e!=hipSuccess) fprintf(stderr,"GPU %d prop error %s\n",gpu,hipGetErrorString(e)); } if(k>pipelen-2 and k<=pipelen-2+nroundlen){ int ib=(k-pipelen+1)%roundlen; if(ib<nb){ size_t ibn=ib*nElemBlock; int k12=(k-1)%2; memcpyCpuToCpu2(prevSigmaX+ibn,h_SigmaX4[k12],curSigmaX+ibn,h_SigmaX5[k12],nByteBlock); memcpyCpuToCpu2(prevSigmaZ+ibn,h_SigmaZ4[k12],curSigmaZ+ibn,h_SigmaZ5[k12],nByteBlock); } } for(int gpu=0;gpu<NGPU;gpu++){ hipSetDevice(GPUs[gpu]); hipDeviceSynchronize(); // hipError_t e=hipGetLastError(); // if(e!=hipSuccess) fprintf(stderr,"GPU %d synch error %s\n",gpu,hipGetErrorString(e)); } for(int i=0;i<threads.size();++i) threads[i].join(); threads.erase(threads.begin(),threads.end()); } // if(is==0){ // write("randforwardwfld",curSigmaX,nxyz); // to_header("randforwardwfld","n1",nx,"o1",ox,"d1",dx); // to_header("randforwardwfld","n2",ny,"o2",oy,"d2",dy); // to_header("randforwardwfld","n3",nz,"o3",oz,"d3",dz); // } // fprintf(stderr,"backward propagations\n"); //flip forward wavefields float *pt; pt=curSigmaX;curSigmaX=prevSigmaX;prevSigmaX=pt; pt=curSigmaZ;curSigmaZ=prevSigmaZ;prevSigmaZ=pt; fprintf(stderr,"forward wavefield min %.10f max %.10f\n",min(curSigmaX,nxyz),max(curSigmaX,nxyz)); memset(prevLambdaX,0,nxyz*sizeof(float)); memset(curLambdaX,0,nxyz*sizeof(float)); memset(prevLambdaZ,0,nxyz*sizeof(float)); memset(curLambdaZ,0,nxyz*sizeof(float)); read("data",data,nnt*nr,(long long)nnt*(long long)irbegin); fprintf(stderr,"data min %.10f max %.10f\n",min(data,nnt*nr),max(data,nnt*nr)); // fprintf(stderr,"inject residual to adjoint wavefields\n"); #pragma omp parallel for num_threads(16) for(int ir=0;ir<nr;ir++){ float temp=dt2*data[(nnt-1)+ir*nnt]; curLambdaX[recIndex[ir]]=TWOTHIRD*temp; curLambdaZ[recIndex[ir]]=ONETHIRD*temp; } for(int k=0;k<nk;k++){ if(k<nroundlen){ int ib=k%roundlen; if(ib<nb){ size_t ibn=ib*nElemBlock; int k2=k%2; threads.push_back(thread(memcpyCpuToCpu3,h_c11[k2],c11+ibn,h_c13[k2],c13+ibn,h_c33[k2],c33+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevSigmaX[k2],prevSigmaX+ibn,h_curSigmaX[k2],curSigmaX+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevSigmaZ[k2],prevSigmaZ+ibn,h_curSigmaZ[k2],curSigmaZ+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevLambdaX[k2],prevLambdaX+ibn,h_curLambdaX[k2],curLambdaX+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevLambdaZ[k2],prevLambdaZ+ibn,h_curLambdaZ[k2],curLambdaZ+ibn,nByteBlock)); threads.push_back(thread(memcpy,h_imagei[k2],image+ibn,nByteBlock)); } } if(k>0 && k<=nroundlen){ int ib=(k-1)%roundlen; if(ib<nb){ int k12=(k-1)%2,kn=k%nbuffCij,k4=k%4; hipSetDevice(GPUs[0]); memcpyCpuToGpu2(d_SigmaX[0][0][k4],h_prevSigmaX[k12],d_SigmaX[0][1][k4],h_curSigmaX[k12],nByteBlock,transfInStream); memcpyCpuToGpu2(d_SigmaZ[0][0][k4],h_prevSigmaZ[k12],d_SigmaZ[0][1][k4],h_curSigmaZ[k12],nByteBlock,transfInStream); memcpyCpuToGpu2(d_LambdaX[0][0][k4],h_prevLambdaX[k12],d_LambdaX[0][1][k4],h_curLambdaX[k12],nByteBlock,transfInStream); memcpyCpuToGpu2(d_LambdaZ[0][0][k4],h_prevLambdaZ[k12],d_LambdaZ[0][1][k4],h_curLambdaZ[k12],nByteBlock,transfInStream); memcpyCpuToGpu3(d_c11[0][kn],h_c11[k12],d_c13[0][kn],h_c13[k12],d_c33[0][kn],h_c33[k12],nByteBlock,transfInStream); hipMemcpyAsync(d_image[0][kn],h_imagei[k12],nByteBlock,hipMemcpyHostToDevice,*transfInStream); } } for(int gpu=0;gpu<NGPU;gpu++){ int kgpu=k+2-gpu*(NUPDATE+3); if(kgpu>2 && kgpu<=NUPDATE+1+nroundlen){ for(int i=0;i<NUPDATE;i++){ int ib=(kgpu-3-i)%roundlen; int iround=(kgpu-3-i)/roundlen; if(ib==recBlock && iround>=0 && iround<nround){ int it=iround*NGPU*NUPDATE+gpu*NUPDATE+2+i; it=nt-1-it; threads.push_back(thread(interpolateResidual,h_data[k%2],data,it+1,nnt,nr,samplingTimeStep)); } } } kgpu=k+1-gpu*(NUPDATE+3); if(kgpu>2 && kgpu<=NUPDATE+1+nroundlen){ for(int i=0;i<NUPDATE;i++){ int ib=(kgpu-3-i)%roundlen; int iround=(kgpu-3-i)/roundlen; if(ib==recBlock && iround>=0 && iround<nround){ hipSetDevice(GPUs[gpu]); int it=iround*NGPU*NUPDATE+gpu*NUPDATE+2+i; it=nt-1-it; hipMemcpyAsync(d_data[gpu][k%2],h_data[(k-1)%2],nr*sizeof(float),hipMemcpyHostToDevice,transfOutStream[gpu]); } } } } for(int gpu=0;gpu<NGPU;gpu++){ int kgpu=k-gpu*(NUPDATE+3); hipSetDevice(GPUs[gpu]); if(kgpu>2 && kgpu<=NUPDATE+1+nroundlen){ for(int i=0;i<NUPDATE;i++){ int ib=(kgpu-3-i)%roundlen; int iround=(kgpu-3-i)/roundlen; if(ib>=0 && ib<nb && iround>=0 && iround<nround){ int it=iround*NGPU*NUPDATE+gpu*NUPDATE+2+i; it=nt-1-it; int ki=kgpu-i,ki14=(ki-1)%4,ki24=(ki-2)%4,ki34=(ki-3)%4,ki2n=(ki-2)%nbuffCij; if(ib==0){ hipLaunchKernelGGL(( forwardKernelTopBlock), dim3(grid),dim3(block),0,computeStream[gpu], d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); hipLaunchKernelGGL(( forwardKernelTopBlock), dim3(grid),dim3(block),0,computeStream[gpu], d_LambdaX[gpu][i+2][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaX[gpu][i][ki24],d_LambdaZ[gpu][i+2][ki24],d_LambdaZ[gpu][i+1][ki34],d_LambdaZ[gpu][i+1][ki24],d_LambdaZ[gpu][i+1][ki14],d_LambdaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } else if(ib==nb-1){ hipLaunchKernelGGL(( forwardKernelBottomBlock), dim3(grid),dim3(block),0,computeStream[gpu], d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); hipLaunchKernelGGL(( forwardKernelBottomBlock), dim3(grid),dim3(block),0,computeStream[gpu], d_LambdaX[gpu][i+2][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaX[gpu][i][ki24],d_LambdaZ[gpu][i+2][ki24],d_LambdaZ[gpu][i+1][ki34],d_LambdaZ[gpu][i+1][ki24],d_LambdaZ[gpu][i+1][ki14],d_LambdaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } else{ hipLaunchKernelGGL(( forwardKernel), dim3(grid),dim3(block),0,computeStream[gpu], d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); hipLaunchKernelGGL(( forwardKernel), dim3(grid),dim3(block),0,computeStream[gpu], d_LambdaX[gpu][i+2][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaX[gpu][i][ki24],d_LambdaZ[gpu][i+2][ki24],d_LambdaZ[gpu][i+1][ki34],d_LambdaZ[gpu][i+1][ki24],d_LambdaZ[gpu][i+1][ki14],d_LambdaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } if(ib==souBlock){ float source=dt2*wavelet[it+1]; hipLaunchKernelGGL(( injectSource), dim3(1),dim3(1),0,computeStream[gpu], d_SigmaX[gpu][i+2][ki24],d_SigmaZ[gpu][i+2][ki24],source,souIndexBlock); } if(ib==recBlock){ hipLaunchKernelGGL(( injectResidual), dim3((nr+BLOCK_DIM-1)/BLOCK_DIM),dim3(BLOCK_DIM),0,computeStream[gpu], d_data[gpu][(k-1)%2],d_LambdaX[gpu][i+2][ki24],d_LambdaZ[gpu][i+2][ki24],nr,d_recIndex[gpu],dt2); } int iz=ib*HALF_STENCIL; if(iz<npad || iz+HALF_STENCIL-1>nz-npad)hipLaunchKernelGGL(( abcXYZ), dim3(grid),dim3(block),0,computeStream[gpu], ib,nx,ny,nz,npad,d_LambdaX[gpu][i+2][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaZ[gpu][i+2][ki24],d_LambdaZ[gpu][i+1][ki24],d_damping[gpu]); elsehipLaunchKernelGGL(( abcXY), dim3(grid),dim3(block),0,computeStream[gpu], ib,nx,ny,nz,npad,d_LambdaX[gpu][i+2][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaZ[gpu][i+2][ki24],d_LambdaZ[gpu][i+1][ki24],d_damping[gpu]); hipLaunchKernelGGL(( imagingKernel), dim3(grid),dim3(block),0,computeStream[gpu], d_image[gpu][ki2n],d_SigmaX[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaZ[gpu][i+1][ki24],nx,ny); } } } if(kgpu>NUPDATE+3 && kgpu<=NUPDATE+3+nroundlen){ int ib=(kgpu-NUPDATE-4)%roundlen; if(ib<nb){ if(NGPU>1 && gpu<NGPU-1){ int n2=nbuffSigma-2,n1=nbuffSigma-1,kn3=kgpu-NUPDATE-3,kn34=kn3%4,kn3n=kn3%nbuffCij; memcpyGpuToGpu2(d_SigmaX[gpu+1][0][kn34],d_SigmaX[gpu][n2][kn34],d_SigmaX[gpu+1][1][kn34],d_SigmaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu2(d_SigmaZ[gpu+1][0][kn34],d_SigmaZ[gpu][n2][kn34],d_SigmaZ[gpu+1][1][kn34],d_SigmaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu2(d_LambdaX[gpu+1][0][kn34],d_LambdaX[gpu][n2][kn34],d_LambdaX[gpu+1][1][kn34],d_LambdaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu2(d_LambdaZ[gpu+1][0][kn34],d_LambdaZ[gpu][n2][kn34],d_LambdaZ[gpu+1][1][kn34],d_LambdaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu3(d_c11[gpu+1][kn3n],d_c11[gpu][kn3n],d_c13[gpu+1][kn3n],d_c13[gpu][kn3n],d_c33[gpu+1][kn3n],d_c33[gpu][kn3n],nByteBlock,transfOutStream+gpu); hipMemcpyAsync(d_image[gpu+1][kn3n],d_image[gpu][kn3n],nByteBlock,hipMemcpyDefault,transfOutStream[gpu]); } else{ int n2=nbuffSigma-2,n1=nbuffSigma-1,k2=k%2,kn3=kgpu-NUPDATE-3,kn34=kn3%4,kn3n=kn3%nbuffCij; memcpyGpuToCpu2(h_SigmaX4[k2],d_SigmaX[gpu][n2][kn34],h_SigmaX5[k2],d_SigmaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToCpu2(h_SigmaZ4[k2],d_SigmaZ[gpu][n2][kn34],h_SigmaZ5[k2],d_SigmaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToCpu2(h_LambdaX4[k2],d_LambdaX[gpu][n2][kn34],h_LambdaX5[k2],d_LambdaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToCpu2(h_LambdaZ4[k2],d_LambdaZ[gpu][n2][kn34],h_LambdaZ5[k2],d_LambdaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); hipMemcpyAsync(h_imageo[k2],d_image[gpu][kn3n],nByteBlock,hipMemcpyDeviceToHost,transfOutStream[gpu]); } } } // hipError_t e=hipGetLastError(); // if(e!=hipSuccess) fprintf(stderr,"GPU %d prop error %s\n",gpu,hipGetErrorString(e)); } if(k>pipelen-2 and k<=pipelen-2+nroundlen){ int ib=(k-pipelen+1)%roundlen; if(ib<nb){ size_t ibn=ib*nElemBlock; int k12=(k-1)%2; memcpyCpuToCpu2(prevSigmaX+ibn,h_SigmaX4[k12],curSigmaX+ibn,h_SigmaX5[k12],nByteBlock); memcpyCpuToCpu2(prevSigmaZ+ibn,h_SigmaZ4[k12],curSigmaZ+ibn,h_SigmaZ5[k12],nByteBlock); memcpyCpuToCpu2(prevLambdaX+ibn,h_LambdaX4[k12],curLambdaX+ibn,h_LambdaX5[k12],nByteBlock); memcpyCpuToCpu2(prevLambdaZ+ibn,h_LambdaZ4[k12],curLambdaZ+ibn,h_LambdaZ5[k12],nByteBlock); memcpy(image+ibn,h_imageo[k12],nByteBlock); } } for(int gpu=0;gpu<NGPU;gpu++){ hipSetDevice(GPUs[gpu]); hipDeviceSynchronize(); // hipError_t e=hipGetLastError(); // if(e!=hipSuccess) fprintf(stderr,"GPU %d synch error %s\n",gpu,hipGetErrorString(e)); } for(int i=0;i<threads.size();++i) threads[i].join(); threads.erase(threads.begin(),threads.end()); } fprintf(stderr,"adj wavefield min %.10f max %.10f\n",min(curLambdaX,nxyz),max(curLambdaX,nxyz)); // if(is==0){ // write("adjwfld",curLambdaX,nxyz); // to_header("adjwfld","n1",nx,"o1",ox,"d1",dx); // to_header("adjwfld","n2",ny,"o2",oy,"d2",dy); // to_header("adjwfld","n3",nz,"o3",oz,"d3",dz); // } hipHostFree(h_data[0]); hipHostFree(h_data[1]); delete []recIndexBlock; delete []recIndex; delete []data; for(int gpu=0;gpu<NGPU;gpu++){ hipSetDevice(GPUs[gpu]); hipFree(d_recIndex[gpu]); hipFree(d_data[gpu][0]); hipFree(d_data[gpu][1]); // hipError_t e=hipGetLastError(); // if(e!=hipSuccess) fprintf(stderr,"shot %d GPU %d dealloc error %s\n",is,gpu,hipGetErrorString(e)); } // #pragma omp parallel for num_threads(16) // for(size_t i=0;i<nxyz;i++) image1[i]+=image[i]; // // if(is%2==0){ // #pragma omp parallel for num_threads(16) // for(size_t i=0;i<nxyz;i++) image2[i]+=image[i]; // } // // if(is%4==0){ // #pragma omp parallel for num_threads(16) // for(size_t i=0;i<nxyz;i++) image3[i]+=image[i]; // } // memset(image,0,nxyz*sizeof(float)); } // write("image1",image1,nxyz); // to_header("image1","n1",nx,"o1",ox,"d1",dx); // to_header("image1","n2",ny,"o2",oy,"d2",dy); // to_header("image1","n3",nz,"o3",oz,"d3",dz); // // write("image2",image2,nxyz); // to_header("image2","n1",nx,"o1",ox,"d1",dx); // to_header("image2","n2",ny,"o2",oy,"d2",dy); // to_header("image2","n3",nz,"o3",oz,"d3",dz); // // write("image3",image3,nxyz); // to_header("image3","n1",nx,"o1",ox,"d1",dx); // to_header("image3","n2",ny,"o2",oy,"d2",dy); // to_header("image3","n3",nz,"o3",oz,"d3",dz); // delete []image1;delete []image2;delete []image3; // int nrtotal=souloc[5*(ns-1)+3]+souloc[5*(ns-1)+4]; // to_header("modeleddata","n1",nnt,"o1",ot,"d1",samplingRate); // to_header("modeleddata","n2",nrtotal,"o2",0.,"d2",1); // to_header("residual","n1",nnt,"o1",ot,"d1",samplingRate); // to_header("residual","n2",nrtotal,"o2",0.,"d2",1); delete []prevSigmaX;delete []curSigmaX; delete []prevSigmaZ;delete []curSigmaZ; delete []prevLambdaX;delete []curLambdaX; delete []prevLambdaZ;delete []curLambdaZ; for(int i=0;i<2;++i){ hipHostFree(h_c11[i]); hipHostFree(h_c13[i]); hipHostFree(h_c33[i]); hipHostFree(h_prevSigmaX[i]); hipHostFree(h_curSigmaX[i]); hipHostFree(h_SigmaX4[i]); hipHostFree(h_SigmaX5[i]); hipHostFree(h_prevSigmaZ[i]); hipHostFree(h_curSigmaZ[i]); hipHostFree(h_SigmaZ4[i]); hipHostFree(h_SigmaZ5[i]); hipHostFree(h_prevLambdaX[i]); hipHostFree(h_curLambdaX[i]); hipHostFree(h_LambdaX4[i]); hipHostFree(h_LambdaX5[i]); hipHostFree(h_prevLambdaZ[i]); hipHostFree(h_curLambdaZ[i]); hipHostFree(h_LambdaZ4[i]); hipHostFree(h_LambdaZ5[i]); hipHostFree(h_imagei[i]); hipHostFree(h_imageo[i]); } for(int gpu=0;gpu<NGPU;gpu++){ hipSetDevice(GPUs[gpu]); hipFree(d_damping[gpu]); for(int i=0;i<nbuffSigma;++i){ for(int j=0;j<4;++j){ hipFree(d_SigmaX[gpu][i][j]); hipFree(d_SigmaZ[gpu][i][j]); } delete []d_SigmaX[gpu][i]; delete []d_SigmaZ[gpu][i]; } delete []d_SigmaX[gpu]; delete []d_SigmaZ[gpu]; for(int i=0;i<nbuffSigma;++i){ for(int j=0;j<4;++j){ hipFree(d_LambdaX[gpu][i][j]); hipFree(d_LambdaZ[gpu][i][j]); } delete []d_LambdaX[gpu][i]; delete []d_LambdaZ[gpu][i]; } delete []d_LambdaX[gpu]; delete []d_LambdaZ[gpu]; for(int i=0;i<nbuffCij;++i){ hipFree(d_c11[gpu][i]); hipFree(d_c13[gpu][i]); hipFree(d_c33[gpu][i]); } delete []d_c11[gpu]; delete []d_c13[gpu]; delete []d_c33[gpu]; for(int i=0;i<nbuffCij;++i){ hipFree(d_image[gpu][i]); } delete []d_image[gpu]; delete []d_data[gpu]; if(gpu==0) hipStreamDestroy(transfInStream[gpu]); hipStreamDestroy(computeStream[gpu]); hipStreamDestroy(transfOutStream[gpu]); hipError_t e=hipGetLastError(); if(e!=hipSuccess) fprintf(stderr,"GPU %d dealloc error %s\n",gpu,hipGetErrorString(e)); } delete []d_recIndex; delete []d_data; delete []d_SigmaX; delete []d_SigmaZ; delete []d_LambdaX; delete []d_LambdaZ; delete []d_c11; delete []d_c13; delete []d_c33; delete []d_image; delete []transfInStream; delete []computeStream; delete []transfOutStream; delete []damping; delete []d_damping; return; }
91b8ec1c66ff096fac3141ebe6f328c5a8701769.cu
#include <cstdlib> #include <cstdio> #include <cmath> #include <iostream> #include <vector> #include <thread> #include <chrono> #include <algorithm> #include "myio.h" #include "mylib.h" #include "wave3d.h" #include "boundary.h" #include "check.h" #include "conversions.h" #include "cluster.h" using namespace std; void rtm3d(float *image,int nx,int ny,int nz,int npad,float oz,float dz,float wbottom,vector<int> &shotid,int max_shot_per_job,int icall,const string &command){ random_shuffle(shotid.begin(),shotid.end()); long long nxy=nx*ny,nxyz=nxy*nz; vector<Job> jobs; vector<int> nfail; int nshotleft=shotid.size(),njob=0,njobtotal=(shotid.size()+max_shot_per_job-1)/max_shot_per_job; while(nshotleft>0){ int nshot1job=min(nshotleft,max_shot_per_job); string shotname="icall_"+to_string(icall)+"_shot_"; string shotlist; for(int i=0;i<nshot1job;i++){ int id=i+njob*max_shot_per_job; if(i<nshot1job-1){ shotname+=to_string(shotid[id])+"_"; shotlist+=to_string(shotid[id])+","; } else{ shotname+=to_string(shotid[id]); shotlist+=to_string(shotid[id]); } } string scriptfile="./scripts/submit_"+shotname+".sh"; string jobname=shotname; string outfile="./output/"+shotname+".log"; string gradfile="./grads/image_"+shotname+".H"; string command1=command+" image="+gradfile+" shotid="+shotlist; genScript(scriptfile,jobname,outfile,command1); string id=submitScript(scriptfile); // string id=to_string(njob); string state; int nerror=0; while(id.compare("error")==0 && nerror<MAX_FAIL){ this_thread::sleep_for(chrono::seconds(5)); id=submitPBSScript(scriptfile); nerror++; } if(id.compare("error")!=0){ state="SUBMITTED"; int idx=njob; Job job(idx,id,scriptfile,outfile,gradfile,state); jobs.push_back(job); nfail.push_back(0); } else fprintf(stderr,"job %s reaches MAX_FAIL %d\n",jobname.c_str(),MAX_FAIL); njob++; nshotleft-=nshot1job; } njob=jobs.size(); this_thread::sleep_for(chrono::seconds(15)); cout<<"submitted "<<njob<<" jobs"<<endl; // for(int i=0;i<jobs.size();i++) jobs[i].printJob(); int ncompleted=0; float *image0=new float[nxyz](); while(ncompleted<njob){ for(int i=0;i<jobs.size();i++){ string id=jobs[i]._jobId; int idx=jobs[i]._jobIdx; string jobstate=jobs[i]._jobState; if(jobstate.compare("COMPLETED")!=0 && jobstate.compare("FAILED")!=0 && jobstate.compare("TIMEOUT")!=0){ string state=getJobState(id); // string state="COMPLETED"; if(state.compare("COMPLETED")==0){ cout<<"job "<<idx<<" id "<<id<<" state "<<state<<endl; ncompleted++; readFromHeader(jobs[i]._gradFile,image,nxyz); cout<<"summing image from file "<<jobs[i]._gradFile<<endl; #pragma omp parallel for for(int i=0;i<nxyz;i++) image0[i]+=image[i]; } else if(state.compare("FAILED")==0 || state.compare("TIMEOUT")==0){ cout<<"job "<<idx<<" id "<<id<<" state "<<state<<endl; nfail[idx]++; if(nfail[idx]>MAX_FAIL){ cout<<"job "<<idx<<" reached MAX_FAIL "<<MAX_FAIL<<endl; ncompleted++; continue; } cout<<" resubmitting"<<endl; Job newjob=jobs[i]; string newid=submitScript(newjob._scriptFile); // string newid=to_string(idx); if(newid.compare("error")!=0) newjob.setJobState("SUBMITTED"); newjob.setJobId(newid); jobs.push_back(newjob); } jobs[i].setJobState(state); } } } zeroBoundary(image0,nx,ny,nz,npad); int nwbottom=(wbottom-oz)/dz+1-npad; memset(image0+npad*nxy,0,nwbottom*nxy*sizeof(float)); #pragma omp parallel for num_threads(16) for(int iz=1;iz<nz-1;iz++){ for(int iy=1;iy<ny-1;iy++){ #pragma omp simd for(int ix=1;ix<nx-1;ix++){ size_t i=ix+iy*nx+iz*nxy; image[i]=image0[i+1]+image0[i-1]+image0[i+nx]+image0[i-nx]+image0[i+nxy]+image0[i-nxy]-6.f*image0[i]; } } } delete []image0; // for(int i=0;i<jobs.size();i++) jobs[i].printJob(); return; } void rtmVEpsDel(float *image,float *souloc,int ns,vector<int> &shotid,float *recloc,float *wavelet,float *v,float *eps,float *del,float *randboundaryV,int nx,int ny,int nz,int nt,int npad,float ox,float oy,float oz,float ot,float dx,float dy,float dz,float dt,float samplingRate,float wbottom){ long long nxy=nx*ny; long long nxyz=nxy*nz; float *c11=new float[nxyz]; float *c13=new float[nxyz]; float *c33=new float[nxyz]; // putBoundary(randboundaryV,v,nx,ny,nz,npad); VEpsDel2Cij(c11,c13,c33,v,eps,del,1.,1.,1.,nxyz); rtmCij3d_f(image,souloc,ns,shotid,recloc,wavelet,c11,c13,c33,nx,ny,nz,nt,npad,ox,oy,oz,ot,dx,dy,dz,dt,samplingRate); zeroBoundary(image,nx,ny,nz,npad); int nwbottom=(wbottom-oz)/dz+1-npad; memset(image+npad*nxy,0,nwbottom*nxy*sizeof(float)); delete []c11;delete []c13;delete []c33; return; } void rtmCij3d_f(float *image,float *souloc,int ns,vector<int> &shotid,float *recloc,float *wavelet,float *c11,float *c13,float *c33,int nx,int ny,int nz,int nt,int npad,float ox,float oy,float oz,float ot,float dx,float dy,float dz,float dt,float samplingRate){ vector<int> GPUs; get_array("gpu",GPUs); int NGPU=GPUs.size(); // fprintf(stderr,"Total # GPUs = %d\n",NGPU); // fprintf(stderr,"GPUs used are:\n"); // for(int i=0;i<NGPU;i++) fprintf(stderr,"%d ",GPUs[i]); // fprintf(stderr,"\n"); float dx2=dx*dx,dy2=dy*dy,dz2=dz*dz,dt2=dt*dt; float dt2dx2=dt2/dx2,dt2dy2=dt2/dy2,dt2dz2=dt2/dz2; int nxy=nx*ny; long long nxyz=nxy*nz; int samplingTimeStep=std::round(samplingRate/dt); int nnt=(nt-1)/samplingTimeStep+1; memset(image,0,nxyz*sizeof(float)); //this is to store multiple images // float *image1=new float[nxyz](); // float *image2=new float[nxyz](); // float *image3=new float[nxyz](); // float *damping=new float[nxy]; // init_abc(damping,nx,ny,npad); float *damping=new float[nxy+nz]; init_abc(damping,nx,ny,nz,npad); float **d_damping=new float*[NGPU](); float *prevSigmaX=new float[nxyz]; float *curSigmaX=new float[nxyz]; float *prevSigmaZ=new float[nxyz]; float *curSigmaZ=new float[nxyz]; float *prevLambdaX=new float[nxyz]; float *curLambdaX=new float[nxyz]; float *prevLambdaZ=new float[nxyz]; float *curLambdaZ=new float[nxyz]; size_t nElemBlock=HALF_STENCIL*nxy; size_t nByteBlock=nElemBlock*sizeof(float); int nb=nz/HALF_STENCIL; float *h_c11[2],*h_c13[2],*h_c33[2]; float *h_prevSigmaX[2],*h_curSigmaX[2],*h_SigmaX4[2],*h_SigmaX5[2]; float *h_prevSigmaZ[2],*h_curSigmaZ[2],*h_SigmaZ4[2],*h_SigmaZ5[2]; float *h_prevLambdaX[2],*h_curLambdaX[2],*h_LambdaX4[2],*h_LambdaX5[2]; float *h_prevLambdaZ[2],*h_curLambdaZ[2],*h_LambdaZ4[2],*h_LambdaZ5[2]; float *h_data[2]; float *h_imagei[2],*h_imageo[2]; for(int i=0;i<2;++i){ cudaHostAlloc(&h_c11[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_c13[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_c33[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_prevSigmaX[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_curSigmaX[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_SigmaX4[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_SigmaX5[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_prevSigmaZ[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_curSigmaZ[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_SigmaZ4[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_SigmaZ5[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_prevLambdaX[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_curLambdaX[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_LambdaX4[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_LambdaX5[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_prevLambdaZ[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_curLambdaZ[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_LambdaZ4[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_LambdaZ5[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_imagei[i],nByteBlock,cudaHostAllocDefault); cudaHostAlloc(&h_imageo[i],nByteBlock,cudaHostAllocDefault); } const int nbuffSigma=NUPDATE+2; int **d_recIndex=new int*[NGPU](); float ***d_data=new float**[NGPU](); float ****d_SigmaX=new float ***[NGPU](); float ****d_SigmaZ=new float ***[NGPU](); float ****d_LambdaX=new float ***[NGPU](); float ****d_LambdaZ=new float ***[NGPU](); const int nbuffCij=NUPDATE+4; float ***d_c11=new float**[NGPU](); float ***d_c13=new float**[NGPU](); float ***d_c33=new float**[NGPU](); float ***d_image=new float**[NGPU](); cudaStream_t *transfInStream=new cudaStream_t[1](); cudaStream_t *transfOutStream=new cudaStream_t[NGPU](); cudaStream_t *computeStream=new cudaStream_t[NGPU](); dim3 block(BLOCK_DIM,BLOCK_DIM); dim3 grid((nx-2*HALF_STENCIL+BLOCK_DIM-1)/BLOCK_DIM,(ny-2*HALF_STENCIL+BLOCK_DIM-1)/BLOCK_DIM); for(int gpu=0;gpu<NGPU;gpu++){ float mem=0.; cudaSetDevice(GPUs[gpu]); // cudaMalloc(&d_damping[gpu],nxy*sizeof(float)); // cudaMemcpy(d_damping[gpu],damping,nxy*sizeof(float),cudaMemcpyHostToDevice); cudaMalloc(&d_damping[gpu],(nxy+nz)*sizeof(float)); mem+=(nxy+nz)*sizeof(float); cudaMemcpy(d_damping[gpu],damping,(nxy+nz)*sizeof(float),cudaMemcpyHostToDevice); d_SigmaX[gpu]=new float**[nbuffSigma](); d_SigmaZ[gpu]=new float**[nbuffSigma](); for(int i=0;i<nbuffSigma;++i){ d_SigmaX[gpu][i]=new float*[4](); d_SigmaZ[gpu][i]=new float*[4](); for(int j=0;j<4;++j){ cudaMalloc(&d_SigmaX[gpu][i][j],nByteBlock); cudaMalloc(&d_SigmaZ[gpu][i][j],nByteBlock); mem+=2*nByteBlock; } } d_LambdaX[gpu]=new float**[nbuffSigma](); d_LambdaZ[gpu]=new float**[nbuffSigma](); for(int i=0;i<nbuffSigma;++i){ d_LambdaX[gpu][i]=new float*[4](); d_LambdaZ[gpu][i]=new float*[4](); for(int j=0;j<4;++j){ cudaMalloc(&d_LambdaX[gpu][i][j],nByteBlock); cudaMalloc(&d_LambdaZ[gpu][i][j],nByteBlock); mem+=2*nByteBlock; } } d_c11[gpu]=new float*[nbuffCij](); d_c13[gpu]=new float*[nbuffCij](); d_c33[gpu]=new float*[nbuffCij](); for(int i=0;i<nbuffCij;++i){ cudaMalloc(&d_c11[gpu][i],nByteBlock); cudaMalloc(&d_c13[gpu][i],nByteBlock); cudaMalloc(&d_c33[gpu][i],nByteBlock); mem+=3*nByteBlock; } d_image[gpu]=new float*[nbuffCij](); for(int i=0;i<nbuffCij;++i){ cudaMalloc(&d_image[gpu][i],nByteBlock); mem+=nByteBlock; } d_data[gpu]=new float*[2](); if(gpu==0) cudaStreamCreate(&transfInStream[gpu]); cudaStreamCreate(&computeStream[gpu]); cudaStreamCreate(&transfOutStream[gpu]); fprintf(stderr,"gpu %d allocates %f GB\n",GPUs[gpu],mem*1e-9); cudaError_t e=cudaGetLastError(); if(e!=cudaSuccess) fprintf(stderr,"GPU %d alloc error %s\n",gpu,cudaGetErrorString(e)); } vector<thread> threads; int pipelen=NGPU*(NUPDATE+3)+3; int nround=(nt-2)/(NGPU*NUPDATE); int roundlen=max(pipelen,nb); int nroundlen=nround*roundlen;; int nk=(nround-1)*roundlen+pipelen+nb-1; // fprintf(stderr,"pipelen=%d nround=%d roundlen=%d nk=%d\n",pipelen,nround,roundlen,nk); int recBlock=(recloc[2]-oz)/dz/HALF_STENCIL; //assume all receivers are at same depth for(vector<int>::iterator id=shotid.begin();id!=shotid.end();id++){ int is=*id; fprintf(stderr,"shot # %d\n",is); int nr=souloc[5*is+3]; int irbegin=souloc[5*is+4]; int *recIndex=new int[nr]; int *recIndexBlock=new int[nr]; float *data=new float[nnt*nr](); #pragma omp parallel for num_threads(16) for(int ir=0;ir<nr;ir++){ int ir1=ir+irbegin; int ix=(recloc[3*ir1]-ox)/dx; int iy=(recloc[3*ir1+1]-oy)/dy; int iz=(recloc[3*ir1+2]-oz)/dz; int ixy=ix+iy*nx; recIndex[ir]=ixy+iz*nxy; recIndexBlock[ir]=ixy+(iz%HALF_STENCIL)*nxy; } int souIndexX=(souloc[5*is]-ox)/dx; int souIndexY=(souloc[5*is+1]-oy)/dy; int souIndexZ=(souloc[5*is+2]-oz)/dz; int souIndex=souIndexX+souIndexY*nx+souIndexZ*nxy; int souIndexBlock=souIndexX+souIndexY*nx+(souIndexZ%HALF_STENCIL)*nxy; int souBlock=souIndexZ/HALF_STENCIL; cudaHostAlloc(&h_data[0],nr*sizeof(float),cudaHostAllocDefault); cudaHostAlloc(&h_data[1],nr*sizeof(float),cudaHostAllocDefault); for(int gpu=0;gpu<NGPU;gpu++){ cudaSetDevice(GPUs[gpu]); cudaMalloc(&d_recIndex[gpu],nr*sizeof(int)); cudaMemcpy(d_recIndex[gpu],recIndexBlock,nr*sizeof(int),cudaMemcpyHostToDevice); cudaMalloc(&d_data[gpu][0],nr*sizeof(float)); cudaMalloc(&d_data[gpu][1],nr*sizeof(float)); // cudaError_t e=cudaGetLastError(); // if(e!=cudaSuccess) fprintf(stderr,"shot %d GPU %d alloc error %s\n",is,gpu,cudaGetErrorString(e)); } // fprintf(stderr,"fowrard propagation with random boundary\n"); memset(prevSigmaX,0,nxyz*sizeof(float)); memset(curSigmaX,0,nxyz*sizeof(float)); memset(prevSigmaZ,0,nxyz*sizeof(float)); memset(curSigmaZ,0,nxyz*sizeof(float)); //injecting source at time 0 to wavefields at time 1 float temp=dt2*wavelet[0]; curSigmaX[souIndex]=temp; curSigmaZ[souIndex]=temp; for(int k=0;k<nk;k++){ if(k<nroundlen){ int ib=k%roundlen; if(ib<nb){ size_t ibn=ib*nElemBlock; int k2=k%2; threads.push_back(thread(memcpyCpuToCpu3,h_c11[k2],c11+ibn,h_c13[k2],c13+ibn,h_c33[k2],c33+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevSigmaX[k2],prevSigmaX+ibn,h_curSigmaX[k2],curSigmaX+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevSigmaZ[k2],prevSigmaZ+ibn,h_curSigmaZ[k2],curSigmaZ+ibn,nByteBlock)); } } if(k>0 && k<=nroundlen){ int ib=(k-1)%roundlen; if(ib<nb){ int k12=(k-1)%2,kn=k%nbuffCij,k4=k%4; cudaSetDevice(GPUs[0]); memcpyCpuToGpu2(d_SigmaX[0][0][k4],h_prevSigmaX[k12],d_SigmaX[0][1][k4],h_curSigmaX[k12],nByteBlock,transfInStream); memcpyCpuToGpu2(d_SigmaZ[0][0][k4],h_prevSigmaZ[k12],d_SigmaZ[0][1][k4],h_curSigmaZ[k12],nByteBlock,transfInStream); memcpyCpuToGpu3(d_c11[0][kn],h_c11[k12],d_c13[0][kn],h_c13[k12],d_c33[0][kn],h_c33[k12],nByteBlock,transfInStream); } } for(int gpu=0;gpu<NGPU;gpu++){ int kgpu=k-gpu*(NUPDATE+3); cudaSetDevice(GPUs[gpu]); if(kgpu>2 && kgpu<=NUPDATE+1+nroundlen){ for(int i=0;i<NUPDATE;i++){ int ib=(kgpu-3-i)%roundlen; int iround=(kgpu-3-i)/roundlen; if(ib>=0 && ib<nb && iround>=0 && iround<nround){ int it=iround*NGPU*NUPDATE+gpu*NUPDATE+2+i; int ki=kgpu-i,ki14=(ki-1)%4,ki24=(ki-2)%4,ki34=(ki-3)%4,ki2n=(ki-2)%nbuffCij; if(ib==0){ forwardKernelTopBlock<<<grid,block,0,computeStream[gpu]>>>(d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } else if(ib==nb-1){ forwardKernelBottomBlock<<<grid,block,0,computeStream[gpu]>>>(d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } else{ forwardKernel<<<grid,block,0,computeStream[gpu]>>>(d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } if(ib==souBlock){ float source=dt2*wavelet[it-1]; injectSource<<<1,1,0,computeStream[gpu]>>>(d_SigmaX[gpu][i+2][ki24],d_SigmaZ[gpu][i+2][ki24],source,souIndexBlock); } } } } if(kgpu>NUPDATE+3 && kgpu<=NUPDATE+3+nroundlen){ int ib=(kgpu-NUPDATE-4)%roundlen; if(ib<nb){ if(NGPU>1 && gpu<NGPU-1){ int n2=nbuffSigma-2,n1=nbuffSigma-1,kn3=kgpu-NUPDATE-3,kn34=kn3%4,kn3n=kn3%nbuffCij; memcpyGpuToGpu2(d_SigmaX[gpu+1][0][kn34],d_SigmaX[gpu][n2][kn34],d_SigmaX[gpu+1][1][kn34],d_SigmaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu2(d_SigmaZ[gpu+1][0][kn34],d_SigmaZ[gpu][n2][kn34],d_SigmaZ[gpu+1][1][kn34],d_SigmaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu3(d_c11[gpu+1][kn3n],d_c11[gpu][kn3n],d_c13[gpu+1][kn3n],d_c13[gpu][kn3n],d_c33[gpu+1][kn3n],d_c33[gpu][kn3n],nByteBlock,transfOutStream+gpu); } else{ int n2=nbuffSigma-2,n1=nbuffSigma-1,k2=k%2,kn34=(kgpu-NUPDATE-3)%4; memcpyGpuToCpu2(h_SigmaX4[k2],d_SigmaX[gpu][n2][kn34],h_SigmaX5[k2],d_SigmaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToCpu2(h_SigmaZ4[k2],d_SigmaZ[gpu][n2][kn34],h_SigmaZ5[k2],d_SigmaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); } } } // cudaError_t e=cudaGetLastError(); // if(e!=cudaSuccess) fprintf(stderr,"GPU %d prop error %s\n",gpu,cudaGetErrorString(e)); } if(k>pipelen-2 and k<=pipelen-2+nroundlen){ int ib=(k-pipelen+1)%roundlen; if(ib<nb){ size_t ibn=ib*nElemBlock; int k12=(k-1)%2; memcpyCpuToCpu2(prevSigmaX+ibn,h_SigmaX4[k12],curSigmaX+ibn,h_SigmaX5[k12],nByteBlock); memcpyCpuToCpu2(prevSigmaZ+ibn,h_SigmaZ4[k12],curSigmaZ+ibn,h_SigmaZ5[k12],nByteBlock); } } for(int gpu=0;gpu<NGPU;gpu++){ cudaSetDevice(GPUs[gpu]); cudaDeviceSynchronize(); // cudaError_t e=cudaGetLastError(); // if(e!=cudaSuccess) fprintf(stderr,"GPU %d synch error %s\n",gpu,cudaGetErrorString(e)); } for(int i=0;i<threads.size();++i) threads[i].join(); threads.erase(threads.begin(),threads.end()); } // if(is==0){ // write("randforwardwfld",curSigmaX,nxyz); // to_header("randforwardwfld","n1",nx,"o1",ox,"d1",dx); // to_header("randforwardwfld","n2",ny,"o2",oy,"d2",dy); // to_header("randforwardwfld","n3",nz,"o3",oz,"d3",dz); // } // fprintf(stderr,"backward propagations\n"); //flip forward wavefields float *pt; pt=curSigmaX;curSigmaX=prevSigmaX;prevSigmaX=pt; pt=curSigmaZ;curSigmaZ=prevSigmaZ;prevSigmaZ=pt; fprintf(stderr,"forward wavefield min %.10f max %.10f\n",min(curSigmaX,nxyz),max(curSigmaX,nxyz)); memset(prevLambdaX,0,nxyz*sizeof(float)); memset(curLambdaX,0,nxyz*sizeof(float)); memset(prevLambdaZ,0,nxyz*sizeof(float)); memset(curLambdaZ,0,nxyz*sizeof(float)); read("data",data,nnt*nr,(long long)nnt*(long long)irbegin); fprintf(stderr,"data min %.10f max %.10f\n",min(data,nnt*nr),max(data,nnt*nr)); // fprintf(stderr,"inject residual to adjoint wavefields\n"); #pragma omp parallel for num_threads(16) for(int ir=0;ir<nr;ir++){ float temp=dt2*data[(nnt-1)+ir*nnt]; curLambdaX[recIndex[ir]]=TWOTHIRD*temp; curLambdaZ[recIndex[ir]]=ONETHIRD*temp; } for(int k=0;k<nk;k++){ if(k<nroundlen){ int ib=k%roundlen; if(ib<nb){ size_t ibn=ib*nElemBlock; int k2=k%2; threads.push_back(thread(memcpyCpuToCpu3,h_c11[k2],c11+ibn,h_c13[k2],c13+ibn,h_c33[k2],c33+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevSigmaX[k2],prevSigmaX+ibn,h_curSigmaX[k2],curSigmaX+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevSigmaZ[k2],prevSigmaZ+ibn,h_curSigmaZ[k2],curSigmaZ+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevLambdaX[k2],prevLambdaX+ibn,h_curLambdaX[k2],curLambdaX+ibn,nByteBlock)); threads.push_back(thread(memcpyCpuToCpu2,h_prevLambdaZ[k2],prevLambdaZ+ibn,h_curLambdaZ[k2],curLambdaZ+ibn,nByteBlock)); threads.push_back(thread(memcpy,h_imagei[k2],image+ibn,nByteBlock)); } } if(k>0 && k<=nroundlen){ int ib=(k-1)%roundlen; if(ib<nb){ int k12=(k-1)%2,kn=k%nbuffCij,k4=k%4; cudaSetDevice(GPUs[0]); memcpyCpuToGpu2(d_SigmaX[0][0][k4],h_prevSigmaX[k12],d_SigmaX[0][1][k4],h_curSigmaX[k12],nByteBlock,transfInStream); memcpyCpuToGpu2(d_SigmaZ[0][0][k4],h_prevSigmaZ[k12],d_SigmaZ[0][1][k4],h_curSigmaZ[k12],nByteBlock,transfInStream); memcpyCpuToGpu2(d_LambdaX[0][0][k4],h_prevLambdaX[k12],d_LambdaX[0][1][k4],h_curLambdaX[k12],nByteBlock,transfInStream); memcpyCpuToGpu2(d_LambdaZ[0][0][k4],h_prevLambdaZ[k12],d_LambdaZ[0][1][k4],h_curLambdaZ[k12],nByteBlock,transfInStream); memcpyCpuToGpu3(d_c11[0][kn],h_c11[k12],d_c13[0][kn],h_c13[k12],d_c33[0][kn],h_c33[k12],nByteBlock,transfInStream); cudaMemcpyAsync(d_image[0][kn],h_imagei[k12],nByteBlock,cudaMemcpyHostToDevice,*transfInStream); } } for(int gpu=0;gpu<NGPU;gpu++){ int kgpu=k+2-gpu*(NUPDATE+3); if(kgpu>2 && kgpu<=NUPDATE+1+nroundlen){ for(int i=0;i<NUPDATE;i++){ int ib=(kgpu-3-i)%roundlen; int iround=(kgpu-3-i)/roundlen; if(ib==recBlock && iround>=0 && iround<nround){ int it=iround*NGPU*NUPDATE+gpu*NUPDATE+2+i; it=nt-1-it; threads.push_back(thread(interpolateResidual,h_data[k%2],data,it+1,nnt,nr,samplingTimeStep)); } } } kgpu=k+1-gpu*(NUPDATE+3); if(kgpu>2 && kgpu<=NUPDATE+1+nroundlen){ for(int i=0;i<NUPDATE;i++){ int ib=(kgpu-3-i)%roundlen; int iround=(kgpu-3-i)/roundlen; if(ib==recBlock && iround>=0 && iround<nround){ cudaSetDevice(GPUs[gpu]); int it=iround*NGPU*NUPDATE+gpu*NUPDATE+2+i; it=nt-1-it; cudaMemcpyAsync(d_data[gpu][k%2],h_data[(k-1)%2],nr*sizeof(float),cudaMemcpyHostToDevice,transfOutStream[gpu]); } } } } for(int gpu=0;gpu<NGPU;gpu++){ int kgpu=k-gpu*(NUPDATE+3); cudaSetDevice(GPUs[gpu]); if(kgpu>2 && kgpu<=NUPDATE+1+nroundlen){ for(int i=0;i<NUPDATE;i++){ int ib=(kgpu-3-i)%roundlen; int iround=(kgpu-3-i)/roundlen; if(ib>=0 && ib<nb && iround>=0 && iround<nround){ int it=iround*NGPU*NUPDATE+gpu*NUPDATE+2+i; it=nt-1-it; int ki=kgpu-i,ki14=(ki-1)%4,ki24=(ki-2)%4,ki34=(ki-3)%4,ki2n=(ki-2)%nbuffCij; if(ib==0){ forwardKernelTopBlock<<<grid,block,0,computeStream[gpu]>>>(d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); forwardKernelTopBlock<<<grid,block,0,computeStream[gpu]>>>(d_LambdaX[gpu][i+2][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaX[gpu][i][ki24],d_LambdaZ[gpu][i+2][ki24],d_LambdaZ[gpu][i+1][ki34],d_LambdaZ[gpu][i+1][ki24],d_LambdaZ[gpu][i+1][ki14],d_LambdaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } else if(ib==nb-1){ forwardKernelBottomBlock<<<grid,block,0,computeStream[gpu]>>>(d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); forwardKernelBottomBlock<<<grid,block,0,computeStream[gpu]>>>(d_LambdaX[gpu][i+2][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaX[gpu][i][ki24],d_LambdaZ[gpu][i+2][ki24],d_LambdaZ[gpu][i+1][ki34],d_LambdaZ[gpu][i+1][ki24],d_LambdaZ[gpu][i+1][ki14],d_LambdaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } else{ forwardKernel<<<grid,block,0,computeStream[gpu]>>>(d_SigmaX[gpu][i+2][ki24],d_SigmaX[gpu][i+1][ki24],d_SigmaX[gpu][i][ki24],d_SigmaZ[gpu][i+2][ki24],d_SigmaZ[gpu][i+1][ki34],d_SigmaZ[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki14],d_SigmaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); forwardKernel<<<grid,block,0,computeStream[gpu]>>>(d_LambdaX[gpu][i+2][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaX[gpu][i][ki24],d_LambdaZ[gpu][i+2][ki24],d_LambdaZ[gpu][i+1][ki34],d_LambdaZ[gpu][i+1][ki24],d_LambdaZ[gpu][i+1][ki14],d_LambdaZ[gpu][i][ki24],d_c11[gpu][ki2n],d_c13[gpu][ki2n],d_c33[gpu][ki2n],nx,ny,dt2dx2,dt2dy2,dt2dz2); } if(ib==souBlock){ float source=dt2*wavelet[it+1]; injectSource<<<1,1,0,computeStream[gpu]>>>(d_SigmaX[gpu][i+2][ki24],d_SigmaZ[gpu][i+2][ki24],source,souIndexBlock); } if(ib==recBlock){ injectResidual<<<(nr+BLOCK_DIM-1)/BLOCK_DIM,BLOCK_DIM,0,computeStream[gpu]>>>(d_data[gpu][(k-1)%2],d_LambdaX[gpu][i+2][ki24],d_LambdaZ[gpu][i+2][ki24],nr,d_recIndex[gpu],dt2); } int iz=ib*HALF_STENCIL; if(iz<npad || iz+HALF_STENCIL-1>nz-npad) abcXYZ<<<grid,block,0,computeStream[gpu]>>>(ib,nx,ny,nz,npad,d_LambdaX[gpu][i+2][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaZ[gpu][i+2][ki24],d_LambdaZ[gpu][i+1][ki24],d_damping[gpu]); else abcXY<<<grid,block,0,computeStream[gpu]>>>(ib,nx,ny,nz,npad,d_LambdaX[gpu][i+2][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaZ[gpu][i+2][ki24],d_LambdaZ[gpu][i+1][ki24],d_damping[gpu]); imagingKernel<<<grid,block,0,computeStream[gpu]>>>(d_image[gpu][ki2n],d_SigmaX[gpu][i+1][ki24],d_SigmaZ[gpu][i+1][ki24],d_LambdaX[gpu][i+1][ki24],d_LambdaZ[gpu][i+1][ki24],nx,ny); } } } if(kgpu>NUPDATE+3 && kgpu<=NUPDATE+3+nroundlen){ int ib=(kgpu-NUPDATE-4)%roundlen; if(ib<nb){ if(NGPU>1 && gpu<NGPU-1){ int n2=nbuffSigma-2,n1=nbuffSigma-1,kn3=kgpu-NUPDATE-3,kn34=kn3%4,kn3n=kn3%nbuffCij; memcpyGpuToGpu2(d_SigmaX[gpu+1][0][kn34],d_SigmaX[gpu][n2][kn34],d_SigmaX[gpu+1][1][kn34],d_SigmaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu2(d_SigmaZ[gpu+1][0][kn34],d_SigmaZ[gpu][n2][kn34],d_SigmaZ[gpu+1][1][kn34],d_SigmaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu2(d_LambdaX[gpu+1][0][kn34],d_LambdaX[gpu][n2][kn34],d_LambdaX[gpu+1][1][kn34],d_LambdaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu2(d_LambdaZ[gpu+1][0][kn34],d_LambdaZ[gpu][n2][kn34],d_LambdaZ[gpu+1][1][kn34],d_LambdaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToGpu3(d_c11[gpu+1][kn3n],d_c11[gpu][kn3n],d_c13[gpu+1][kn3n],d_c13[gpu][kn3n],d_c33[gpu+1][kn3n],d_c33[gpu][kn3n],nByteBlock,transfOutStream+gpu); cudaMemcpyAsync(d_image[gpu+1][kn3n],d_image[gpu][kn3n],nByteBlock,cudaMemcpyDefault,transfOutStream[gpu]); } else{ int n2=nbuffSigma-2,n1=nbuffSigma-1,k2=k%2,kn3=kgpu-NUPDATE-3,kn34=kn3%4,kn3n=kn3%nbuffCij; memcpyGpuToCpu2(h_SigmaX4[k2],d_SigmaX[gpu][n2][kn34],h_SigmaX5[k2],d_SigmaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToCpu2(h_SigmaZ4[k2],d_SigmaZ[gpu][n2][kn34],h_SigmaZ5[k2],d_SigmaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToCpu2(h_LambdaX4[k2],d_LambdaX[gpu][n2][kn34],h_LambdaX5[k2],d_LambdaX[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); memcpyGpuToCpu2(h_LambdaZ4[k2],d_LambdaZ[gpu][n2][kn34],h_LambdaZ5[k2],d_LambdaZ[gpu][n1][kn34],nByteBlock,transfOutStream+gpu); cudaMemcpyAsync(h_imageo[k2],d_image[gpu][kn3n],nByteBlock,cudaMemcpyDeviceToHost,transfOutStream[gpu]); } } } // cudaError_t e=cudaGetLastError(); // if(e!=cudaSuccess) fprintf(stderr,"GPU %d prop error %s\n",gpu,cudaGetErrorString(e)); } if(k>pipelen-2 and k<=pipelen-2+nroundlen){ int ib=(k-pipelen+1)%roundlen; if(ib<nb){ size_t ibn=ib*nElemBlock; int k12=(k-1)%2; memcpyCpuToCpu2(prevSigmaX+ibn,h_SigmaX4[k12],curSigmaX+ibn,h_SigmaX5[k12],nByteBlock); memcpyCpuToCpu2(prevSigmaZ+ibn,h_SigmaZ4[k12],curSigmaZ+ibn,h_SigmaZ5[k12],nByteBlock); memcpyCpuToCpu2(prevLambdaX+ibn,h_LambdaX4[k12],curLambdaX+ibn,h_LambdaX5[k12],nByteBlock); memcpyCpuToCpu2(prevLambdaZ+ibn,h_LambdaZ4[k12],curLambdaZ+ibn,h_LambdaZ5[k12],nByteBlock); memcpy(image+ibn,h_imageo[k12],nByteBlock); } } for(int gpu=0;gpu<NGPU;gpu++){ cudaSetDevice(GPUs[gpu]); cudaDeviceSynchronize(); // cudaError_t e=cudaGetLastError(); // if(e!=cudaSuccess) fprintf(stderr,"GPU %d synch error %s\n",gpu,cudaGetErrorString(e)); } for(int i=0;i<threads.size();++i) threads[i].join(); threads.erase(threads.begin(),threads.end()); } fprintf(stderr,"adj wavefield min %.10f max %.10f\n",min(curLambdaX,nxyz),max(curLambdaX,nxyz)); // if(is==0){ // write("adjwfld",curLambdaX,nxyz); // to_header("adjwfld","n1",nx,"o1",ox,"d1",dx); // to_header("adjwfld","n2",ny,"o2",oy,"d2",dy); // to_header("adjwfld","n3",nz,"o3",oz,"d3",dz); // } cudaFreeHost(h_data[0]); cudaFreeHost(h_data[1]); delete []recIndexBlock; delete []recIndex; delete []data; for(int gpu=0;gpu<NGPU;gpu++){ cudaSetDevice(GPUs[gpu]); cudaFree(d_recIndex[gpu]); cudaFree(d_data[gpu][0]); cudaFree(d_data[gpu][1]); // cudaError_t e=cudaGetLastError(); // if(e!=cudaSuccess) fprintf(stderr,"shot %d GPU %d dealloc error %s\n",is,gpu,cudaGetErrorString(e)); } // #pragma omp parallel for num_threads(16) // for(size_t i=0;i<nxyz;i++) image1[i]+=image[i]; // // if(is%2==0){ // #pragma omp parallel for num_threads(16) // for(size_t i=0;i<nxyz;i++) image2[i]+=image[i]; // } // // if(is%4==0){ // #pragma omp parallel for num_threads(16) // for(size_t i=0;i<nxyz;i++) image3[i]+=image[i]; // } // memset(image,0,nxyz*sizeof(float)); } // write("image1",image1,nxyz); // to_header("image1","n1",nx,"o1",ox,"d1",dx); // to_header("image1","n2",ny,"o2",oy,"d2",dy); // to_header("image1","n3",nz,"o3",oz,"d3",dz); // // write("image2",image2,nxyz); // to_header("image2","n1",nx,"o1",ox,"d1",dx); // to_header("image2","n2",ny,"o2",oy,"d2",dy); // to_header("image2","n3",nz,"o3",oz,"d3",dz); // // write("image3",image3,nxyz); // to_header("image3","n1",nx,"o1",ox,"d1",dx); // to_header("image3","n2",ny,"o2",oy,"d2",dy); // to_header("image3","n3",nz,"o3",oz,"d3",dz); // delete []image1;delete []image2;delete []image3; // int nrtotal=souloc[5*(ns-1)+3]+souloc[5*(ns-1)+4]; // to_header("modeleddata","n1",nnt,"o1",ot,"d1",samplingRate); // to_header("modeleddata","n2",nrtotal,"o2",0.,"d2",1); // to_header("residual","n1",nnt,"o1",ot,"d1",samplingRate); // to_header("residual","n2",nrtotal,"o2",0.,"d2",1); delete []prevSigmaX;delete []curSigmaX; delete []prevSigmaZ;delete []curSigmaZ; delete []prevLambdaX;delete []curLambdaX; delete []prevLambdaZ;delete []curLambdaZ; for(int i=0;i<2;++i){ cudaFreeHost(h_c11[i]); cudaFreeHost(h_c13[i]); cudaFreeHost(h_c33[i]); cudaFreeHost(h_prevSigmaX[i]); cudaFreeHost(h_curSigmaX[i]); cudaFreeHost(h_SigmaX4[i]); cudaFreeHost(h_SigmaX5[i]); cudaFreeHost(h_prevSigmaZ[i]); cudaFreeHost(h_curSigmaZ[i]); cudaFreeHost(h_SigmaZ4[i]); cudaFreeHost(h_SigmaZ5[i]); cudaFreeHost(h_prevLambdaX[i]); cudaFreeHost(h_curLambdaX[i]); cudaFreeHost(h_LambdaX4[i]); cudaFreeHost(h_LambdaX5[i]); cudaFreeHost(h_prevLambdaZ[i]); cudaFreeHost(h_curLambdaZ[i]); cudaFreeHost(h_LambdaZ4[i]); cudaFreeHost(h_LambdaZ5[i]); cudaFreeHost(h_imagei[i]); cudaFreeHost(h_imageo[i]); } for(int gpu=0;gpu<NGPU;gpu++){ cudaSetDevice(GPUs[gpu]); cudaFree(d_damping[gpu]); for(int i=0;i<nbuffSigma;++i){ for(int j=0;j<4;++j){ cudaFree(d_SigmaX[gpu][i][j]); cudaFree(d_SigmaZ[gpu][i][j]); } delete []d_SigmaX[gpu][i]; delete []d_SigmaZ[gpu][i]; } delete []d_SigmaX[gpu]; delete []d_SigmaZ[gpu]; for(int i=0;i<nbuffSigma;++i){ for(int j=0;j<4;++j){ cudaFree(d_LambdaX[gpu][i][j]); cudaFree(d_LambdaZ[gpu][i][j]); } delete []d_LambdaX[gpu][i]; delete []d_LambdaZ[gpu][i]; } delete []d_LambdaX[gpu]; delete []d_LambdaZ[gpu]; for(int i=0;i<nbuffCij;++i){ cudaFree(d_c11[gpu][i]); cudaFree(d_c13[gpu][i]); cudaFree(d_c33[gpu][i]); } delete []d_c11[gpu]; delete []d_c13[gpu]; delete []d_c33[gpu]; for(int i=0;i<nbuffCij;++i){ cudaFree(d_image[gpu][i]); } delete []d_image[gpu]; delete []d_data[gpu]; if(gpu==0) cudaStreamDestroy(transfInStream[gpu]); cudaStreamDestroy(computeStream[gpu]); cudaStreamDestroy(transfOutStream[gpu]); cudaError_t e=cudaGetLastError(); if(e!=cudaSuccess) fprintf(stderr,"GPU %d dealloc error %s\n",gpu,cudaGetErrorString(e)); } delete []d_recIndex; delete []d_data; delete []d_SigmaX; delete []d_SigmaZ; delete []d_LambdaX; delete []d_LambdaZ; delete []d_c11; delete []d_c13; delete []d_c33; delete []d_image; delete []transfInStream; delete []computeStream; delete []transfOutStream; delete []damping; delete []d_damping; return; }
a42be2528a45f81409a73a2d535681ea5318ea73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/loss_op.h" namespace caffe2 { namespace { template <typename T> __global__ void ALGKernel(const int N, const T* dY, T* dX) { const T value = (*dY) / N; CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = value; } } } // namespace class AveragedLossGradientGPUSpecialization final : public Operator<CUDAContext> { public: AveragedLossGradientGPUSpecialization( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~AveragedLossGradientGPUSpecialization() {} USE_OPERATOR_FUNCTIONS(CUDAContext); bool RunOnDevice() override { auto& X = Input(0); auto& dY = Input(1); DCHECK_EQ(dY.size(), 1); auto* dX = Output(0); dX->ResizeLike(X); hipLaunchKernelGGL(( ALGKernel<float>), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), dX->mutable_data<float>()); return true; } }; namespace { REGISTER_CUDA_OPERATOR(AveragedLoss, AveragedLoss<float, CUDAContext>); REGISTER_CUDA_OPERATOR(AveragedLossGradient, AveragedLossGradientGPUSpecialization); } // namespace } // namespace caffe2
a42be2528a45f81409a73a2d535681ea5318ea73.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/loss_op.h" namespace caffe2 { namespace { template <typename T> __global__ void ALGKernel(const int N, const T* dY, T* dX) { const T value = (*dY) / N; CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = value; } } } // namespace class AveragedLossGradientGPUSpecialization final : public Operator<CUDAContext> { public: AveragedLossGradientGPUSpecialization( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~AveragedLossGradientGPUSpecialization() {} USE_OPERATOR_FUNCTIONS(CUDAContext); bool RunOnDevice() override { auto& X = Input(0); auto& dY = Input(1); DCHECK_EQ(dY.size(), 1); auto* dX = Output(0); dX->ResizeLike(X); ALGKernel<float><<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), dX->mutable_data<float>()); return true; } }; namespace { REGISTER_CUDA_OPERATOR(AveragedLoss, AveragedLoss<float, CUDAContext>); REGISTER_CUDA_OPERATOR(AveragedLossGradient, AveragedLossGradientGPUSpecialization); } // namespace } // namespace caffe2
4a371d9fd09b75d5187c5166991386d601e70da7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/native/hip/GridSampler.cuh> #include <ATen/native/hip/UpSample.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <c10/macros/Macros.h> namespace at { namespace native { using namespace at::cuda::detail; using at::native::detail::GridSamplerInterpolation; using at::native::detail::GridSamplerPadding; namespace { template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_2d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sH = output.strides[2]; index_t out_sW = output.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const index_t grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t x = grid.data[grid_offset]; scalar_t y = grid.data[grid_offset + grid_sCoor]; scalar_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners); scalar_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } else if (interpolation_mode == GridSamplerInterpolation::Bicubic) { ix = grid_sampler_unnormalize(x, inp_W, align_corners); iy = grid_sampler_unnormalize(y, inp_H, align_corners); scalar_t ix_nw = ::floor(ix); scalar_t iy_nw = ::floor(iy); const scalar_t tx = ix - ix_nw; const scalar_t ty = iy - iy_nw; auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { scalar_t coefficients[4]; #pragma unroll 4 for (index_t i = 0; i < 4; ++i) { coefficients[i] = cubic_interp1d( get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), tx); } *out_ptr_NCHW = cubic_interp1d( coefficients[0], coefficients[1], coefficients[2], coefficients[3], ty); } } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(512) __global__ void grid_sampler_3d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sD = output.strides[2]; index_t out_sH = output.strides[3]; index_t out_sW = output.strides[4]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const index_t grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); index_t iz_nearest = static_cast<index_t>(::round(iz)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } // Note [Passing pointer and offset to fastAtomicAdd] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // For its internal bounds checking, fastAtomicAdd needs to know where the destination address // lies relative to the entire tensor, so we pass the base grad_input.data and full offset information, // including batch * channel offset (NC_offset). template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_2d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners, const index_t grad_input_memory_span) { index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sH = grad_output.strides[2]; index_t gOut_sW = grad_output.strides[3]; index_t gInp_sN = grad_input.strides[0]; index_t gInp_sC = grad_input.strides[1]; index_t gInp_sH = grad_input.strides[2]; index_t gInp_sW = grad_input.strides[3]; index_t gGrid_sW = grad_grid.strides[2]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const auto grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t x = grid.data[grid_offset]; scalar_t y = grid.data[grid_offset + grid_sCoor]; // multipliers for gradients on ix and iy scalar_t gix_mult, giy_mult; scalar_t ix = grid_sampler_compute_source_index_set_grad(x, inp_W, padding_mode, align_corners, &gix_mult); scalar_t iy = grid_sampler_compute_source_index_set_grad(y, inp_H, padding_mode, align_corners, &giy_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) { scalar_t gOut = *gOut_ptr_NCHW; // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_2d(grad_input.data, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut, NC_offset, grad_input_memory_span); // calculate grad_grid if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; for (index_t c = 0; c < C; ++c, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) { // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_2d(grad_input.data, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW, NC_offset, grad_input_memory_span); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NHW[1] = static_cast<scalar_t>(0); } else if (interpolation_mode == GridSamplerInterpolation::Bicubic) { ix = grid_sampler_unnormalize_set_grad(x, inp_W, align_corners, &gix_mult); iy = grid_sampler_unnormalize_set_grad(y, inp_H, align_corners, &giy_mult); scalar_t ix_nw = ::floor(ix); scalar_t iy_nw = ::floor(iy); const scalar_t tx = ix - ix_nw; const scalar_t ty = iy - iy_nw; scalar_t x_coeffs[4]; scalar_t y_coeffs[4]; scalar_t x_coeffs_grad[4]; scalar_t y_coeffs_grad[4]; get_cubic_upsampling_coefficients<scalar_t>(x_coeffs, tx); get_cubic_upsampling_coefficients<scalar_t>(y_coeffs, ty); get_cubic_coefficients_grad<scalar_t>(x_coeffs_grad, tx); get_cubic_coefficients_grad<scalar_t>(y_coeffs_grad, ty); scalar_t gix = static_cast<scalar_t>(0); scalar_t giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (index_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC+= inp_sC) { scalar_t gOut = *gOut_ptr_NCHW; #pragma unroll 4 for (index_t i = 0; i < 4; ++i) { #pragma unroll 4 for (index_t j = 0; j < 4; ++j) { // set input gradient. See Note [Passing pointer and offset to fastAtomicAdd]. add_value_bounded<scalar_t>(grad_input.data, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, gInp_sW, gInp_sH, gOut * x_coeffs[i] * y_coeffs[j], padding_mode, align_corners, NC_offset, grad_input_memory_span); // set grid gradient scalar_t val = get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners); gix -= val * x_coeffs_grad[i] * y_coeffs[j] * gOut; giy -= val * y_coeffs_grad[j] * x_coeffs[i] * gOut; } } } scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_3d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners, const index_t grad_input_memory_span) { index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sD = grad_output.strides[2]; index_t gOut_sH = grad_output.strides[3]; index_t gOut_sW = grad_output.strides[4]; index_t gInp_sN = grad_input.strides[0]; index_t gInp_sC = grad_input.strides[1]; index_t gInp_sD = grad_input.strides[2]; index_t gInp_sH = grad_input.strides[3]; index_t gInp_sW = grad_input.strides[4]; index_t gGrid_sW = grad_grid.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // multipliers for gradients on ix, iy, and iz scalar_t gix_mult, giy_mult, giz_mult; ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult); iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult); iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; // calculate bilinear weighted pixel value and set output pixel for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC += inp_sC) { scalar_t gOut = *gOut_ptr_NCDHW; // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_3d(grad_input.data, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut, NC_offset, grad_input_memory_span); // calculate grad_grid if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { auto ix_nearest = static_cast<index_t>(::round(ix)); auto iy_nearest = static_cast<index_t>(::round(iy)); auto iz_nearest = static_cast<index_t>(::round(iz)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC) { // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_3d(grad_input.data, iz_nearest, iy_nearest, ix_nearest, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW, NC_offset, grad_input_memory_span); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0); } } } } // namespace // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_2d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { auto N = input.size(0); auto C = input.size(1); auto H = grid.size(1); auto W = grid.size(2); auto output = at::empty({N, C, H, W}, input.options()); int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_3d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto output = at::empty({N, input.size(1), D, H, W}, input.options()); int64_t count = N * D * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 512)), dim3(512), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 512)), dim3(512), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_2d_backward_cuda"); auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT); int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/static_cast<int>(grad_input.numel())); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(grad_input), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/grad_input.numel()); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } return std::make_tuple(grad_input, grad_grid); } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_3d_backward_cuda"); auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT); int64_t count = N * D * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/static_cast<int>(grad_input.numel())); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(grad_input), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/grad_input.numel()); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } return std::make_tuple(grad_input, grad_grid); } }} // namespace at::native
4a371d9fd09b75d5187c5166991386d601e70da7.cu
#include <ATen/ATen.h> #include <ATen/native/cuda/GridSampler.cuh> #include <ATen/native/cuda/UpSample.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/macros/Macros.h> namespace at { namespace native { using namespace at::cuda::detail; using at::native::detail::GridSamplerInterpolation; using at::native::detail::GridSamplerPadding; namespace { template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_2d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sH = output.strides[2]; index_t out_sW = output.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const index_t grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t x = grid.data[grid_offset]; scalar_t y = grid.data[grid_offset + grid_sCoor]; scalar_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners); scalar_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } else if (interpolation_mode == GridSamplerInterpolation::Bicubic) { ix = grid_sampler_unnormalize(x, inp_W, align_corners); iy = grid_sampler_unnormalize(y, inp_H, align_corners); scalar_t ix_nw = ::floor(ix); scalar_t iy_nw = ::floor(iy); const scalar_t tx = ix - ix_nw; const scalar_t ty = iy - iy_nw; auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { scalar_t coefficients[4]; #pragma unroll 4 for (index_t i = 0; i < 4; ++i) { coefficients[i] = cubic_interp1d( get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), tx); } *out_ptr_NCHW = cubic_interp1d( coefficients[0], coefficients[1], coefficients[2], coefficients[3], ty); } } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(512) __global__ void grid_sampler_3d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sD = output.strides[2]; index_t out_sH = output.strides[3]; index_t out_sW = output.strides[4]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const index_t grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); index_t iz_nearest = static_cast<index_t>(::round(iz)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } // Note [Passing pointer and offset to fastAtomicAdd] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // For its internal bounds checking, fastAtomicAdd needs to know where the destination address // lies relative to the entire tensor, so we pass the base grad_input.data and full offset information, // including batch * channel offset (NC_offset). template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_2d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners, const index_t grad_input_memory_span) { index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sH = grad_output.strides[2]; index_t gOut_sW = grad_output.strides[3]; index_t gInp_sN = grad_input.strides[0]; index_t gInp_sC = grad_input.strides[1]; index_t gInp_sH = grad_input.strides[2]; index_t gInp_sW = grad_input.strides[3]; index_t gGrid_sW = grad_grid.strides[2]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const auto grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t x = grid.data[grid_offset]; scalar_t y = grid.data[grid_offset + grid_sCoor]; // multipliers for gradients on ix and iy scalar_t gix_mult, giy_mult; scalar_t ix = grid_sampler_compute_source_index_set_grad(x, inp_W, padding_mode, align_corners, &gix_mult); scalar_t iy = grid_sampler_compute_source_index_set_grad(y, inp_H, padding_mode, align_corners, &giy_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) { scalar_t gOut = *gOut_ptr_NCHW; // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_2d(grad_input.data, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut, NC_offset, grad_input_memory_span); // calculate grad_grid if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(::round(ix)); index_t iy_nearest = static_cast<index_t>(::round(iy)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; for (index_t c = 0; c < C; ++c, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) { // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_2d(grad_input.data, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW, NC_offset, grad_input_memory_span); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NHW[1] = static_cast<scalar_t>(0); } else if (interpolation_mode == GridSamplerInterpolation::Bicubic) { ix = grid_sampler_unnormalize_set_grad(x, inp_W, align_corners, &gix_mult); iy = grid_sampler_unnormalize_set_grad(y, inp_H, align_corners, &giy_mult); scalar_t ix_nw = ::floor(ix); scalar_t iy_nw = ::floor(iy); const scalar_t tx = ix - ix_nw; const scalar_t ty = iy - iy_nw; scalar_t x_coeffs[4]; scalar_t y_coeffs[4]; scalar_t x_coeffs_grad[4]; scalar_t y_coeffs_grad[4]; get_cubic_upsampling_coefficients<scalar_t>(x_coeffs, tx); get_cubic_upsampling_coefficients<scalar_t>(y_coeffs, ty); get_cubic_coefficients_grad<scalar_t>(x_coeffs_grad, tx); get_cubic_coefficients_grad<scalar_t>(y_coeffs_grad, ty); scalar_t gix = static_cast<scalar_t>(0); scalar_t giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (index_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC+= inp_sC) { scalar_t gOut = *gOut_ptr_NCHW; #pragma unroll 4 for (index_t i = 0; i < 4; ++i) { #pragma unroll 4 for (index_t j = 0; j < 4; ++j) { // set input gradient. See Note [Passing pointer and offset to fastAtomicAdd]. add_value_bounded<scalar_t>(grad_input.data, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, gInp_sW, gInp_sH, gOut * x_coeffs[i] * y_coeffs[j], padding_mode, align_corners, NC_offset, grad_input_memory_span); // set grid gradient scalar_t val = get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners); gix -= val * x_coeffs_grad[i] * y_coeffs[j] * gOut; giy -= val * y_coeffs_grad[j] * x_coeffs[i] * gOut; } } } scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_3d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners, const index_t grad_input_memory_span) { index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sD = grad_output.strides[2]; index_t gOut_sH = grad_output.strides[3]; index_t gOut_sW = grad_output.strides[4]; index_t gInp_sN = grad_input.strides[0]; index_t gInp_sC = grad_input.strides[1]; index_t gInp_sD = grad_input.strides[2]; index_t gInp_sH = grad_input.strides[3]; index_t gInp_sW = grad_input.strides[4]; index_t gGrid_sW = grad_grid.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // multipliers for gradients on ix, iy, and iz scalar_t gix_mult, giy_mult, giz_mult; ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult); iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult); iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; // calculate bilinear weighted pixel value and set output pixel for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC += inp_sC) { scalar_t gOut = *gOut_ptr_NCDHW; // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_3d(grad_input.data, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut, NC_offset, grad_input_memory_span); // calculate grad_grid if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { auto ix_nearest = static_cast<index_t>(::round(ix)); auto iy_nearest = static_cast<index_t>(::round(iy)); auto iz_nearest = static_cast<index_t>(::round(iz)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC) { // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_3d(grad_input.data, iz_nearest, iy_nearest, ix_nearest, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW, NC_offset, grad_input_memory_span); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0); } } } } // namespace // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_2d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { auto N = input.size(0); auto C = input.size(1); auto H = grid.size(1); auto W = grid.size(2); auto output = at::empty({N, C, H, W}, input.options()); int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { grid_sampler_2d_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { grid_sampler_2d_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_3d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto output = at::empty({N, input.size(1), D, H, W}, input.options()); int64_t count = N * D * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { grid_sampler_3d_kernel<scalar_t> <<<GET_BLOCKS(count, 512), 512, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { grid_sampler_3d_kernel<scalar_t> <<<GET_BLOCKS(count, 512), 512, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_2d_backward_cuda"); auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT); int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { grid_sampler_2d_backward_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/static_cast<int>(grad_input.numel())); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { grid_sampler_2d_backward_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(grad_input), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/grad_input.numel()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } return std::make_tuple(grad_input, grad_grid); } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_3d_backward_cuda"); auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT); int64_t count = N * D * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { grid_sampler_3d_backward_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/static_cast<int>(grad_input.numel())); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { grid_sampler_3d_backward_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(grad_input), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/grad_input.numel()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } return std::make_tuple(grad_input, grad_grid); } }} // namespace at::native
c667f60288a08bba8bebedd2ecf0cd31da56a4f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// MM2chain.cu /// By Waruna Ranasinghe /// Created: 18 Aug 2017 /// Last Modified: ///NOTE: Please read all the comments before modifying the file. Failure to maintain //the required variable names and function names will be penalized. //Computes the matrix multiplication of sequence of 2x2 matrices. //The operation is associative but not commutative. Therefore you cannot //change the order of matrix multiplications //Includes #include <stdio.h> #include "MM2chainKernel.h" #include "timer.h" //defines /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error: %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } #define epsilon (float)1e-5 /// /// main /// int main(int argc, char** argv) { int G; //number of thread blocks in the grid int n; //number of 2x2 matrices //host variables float result[4]; // your final answer should be assigned to this variable float result_temp[4]; float *h_A; //input 2x2 matrix sequence. Stored in a 1D array float *h_matmults; //partial output matmults per thread block //device variables float *d_A; //input 2x2 matrix sequence. Stored in a 1D array float *d_matmults; //partial output matmults per thread block if (argc < 2) { printf("Usage: %s G\n\tG - number of thread blocks in the grid\n", argv[0]); exit(0); } //init size params G = atoi(argv[1]); n=B*G*C; // UNCOMMENT THIS WHEN CHECKIN // printf("Number of threads blocks: %d\nNumber of threads per block: %d\nNumber of matrices per thread: %d\n", G, B, C); //allocating memory for host variables h_A = (float *)malloc(n*4*sizeof(float)); if (h_A==NULL) { fprintf(stderr, "Failed to allocate host array h_A!\n"); exit(-1); } h_matmults = (float *)malloc(G*4*sizeof(float)); if (h_matmults==NULL) { fprintf(stderr, "Failed to allocate host array h_matmults!\n"); exit(-1); } //initializing input matrices // We repeat the following sequence of matrices // |1.0 0.0| |4.0 3.0| |1.0 0.0| |0.4 -0.3| // |0.5 1.0| |0.0 2.5| |-0.5 1.0| |0.0 0.25| // for (int i=0; i<n*4; i+=16) { h_A[i]=1.0f; h_A[i+1]=0.0f; h_A[i+2]=0.5f; h_A[i+3]=1.0f; h_A[i+4]=4.0f; h_A[i+5]=3.0f; h_A[i+6]=0.0f; h_A[i+7]=2.5f; h_A[i+8]=1.0f; h_A[i+9]=0.0f; h_A[i+10]=-0.5f; h_A[i+11]=1.0f; h_A[i+12]=0.4f; h_A[i+13]=-0.3f; h_A[i+14]=0.0f; h_A[i+15]=0.25f; } CUDA_CHECK_RETURN(hipSetDevice(0)); //Allocating the device memory for input vector. CUDA_CHECK_RETURN(hipMalloc((void **)&d_A, n*4*sizeof(float))); //allocate device memory for the partial output CUDA_CHECK_RETURN(hipMalloc((void **)&d_matmults, G*4*sizeof(float))); initialize_timer (); start_timer(); //copy the host input matrices array h_A to the device input array d_A in device //memory CUDA_CHECK_RETURN(hipMemcpy(d_A, h_A, n*4*sizeof(float), hipMemcpyHostToDevice)); float time_input, time_gpu, time_out; /* Start Timer */ //initialize_timer (); //start_timer(); stop_timer(); time_input=elapsed_time (); reset_timer(); start_timer(); //Launch the reduce CUDA kernel with G blocks per grid and B threads per //block hipLaunchKernelGGL(( multiply), dim3(G), dim3(B), 0, 0, d_A, d_matmults); //check whether there were errors while launching the CUDA kernel CUDA_CHECK_RETURN(hipGetLastError()); //wait for the kernel to finish CUDA_CHECK_RETURN(hipDeviceSynchronize()); /* stop timer */ stop_timer(); time_gpu=elapsed_time (); reset_timer(); start_timer(); //Copy partial results back to host CUDA_CHECK_RETURN(hipMemcpy(h_matmults, d_matmults, G*4*sizeof(float), hipMemcpyDeviceToHost)); result[0] = 1.0f; result[1] = 0.0f; result[2] = 0.0f; result[3] = 1.0f; //compute final answer by multiplying the partial answers of threads from //GPU for (int i=0; i<G; i++) { result_temp[0] = result[0]*h_matmults[i*4] + result[1]*h_matmults[i*4+2]; result_temp[1] = result[0]*h_matmults[i*4+1] + result[1]*h_matmults[i*4+3]; result_temp[2] = result[2]*h_matmults[i*4] + result[3]*h_matmults[i*4+2]; result_temp[3] = result[2]*h_matmults[i*4+1] + result[3]*h_matmults[i*4+3]; result[0] = result_temp[0]; result[1] = result_temp[1]; result[2] = result_temp[2]; result[3] = result_temp[3]; } stop_timer(); time_out=elapsed_time (); //THIS IS THE PRINT THAT SHOULD REMAIN WHEN CHECKIN!!! //printf("Output:\n%f %f\n%f %f\nTime to copy input data: %f s\nTime to compute[GPU]: %f s\nTime to compute output: %f s\n", result[0], result[1], result[2], result[3], time_input, time_gpu, time_out); //THIS IS THE PRINT FOR GENERATING report printf("%f \n", time_gpu); // printf("Result: %f , Elapsed time [GPU]: %f , all: %f\n", result, time); // if (result[0]-1.0f < epsilon && result[1]-0.0f < 0.2f && result[2]-0.0f < epsilon && result[3]-1.0f < epsilon) { if (fabsf(result[0]-1.0f) < epsilon && fabsf(result[1]-0.0f) < 0.2f && fabsf(result[2]-0.0f) < epsilon && fabsf(result[3]-1.0f) < epsilon) { // UNCOMMENT BEFORE TURNING IN! // printf("Test passed.\n"); } else { printf("Test failed.\n"); } //cleaning up free(h_A); free(h_matmults); return 0; }
c667f60288a08bba8bebedd2ecf0cd31da56a4f5.cu
/// /// MM2chain.cu /// By Waruna Ranasinghe /// Created: 18 Aug 2017 /// Last Modified: ///NOTE: Please read all the comments before modifying the file. Failure to maintain //the required variable names and function names will be penalized. //Computes the matrix multiplication of sequence of 2x2 matrices. //The operation is associative but not commutative. Therefore you cannot //change the order of matrix multiplications //Includes #include <stdio.h> #include "MM2chainKernel.h" #include "timer.h" //defines /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error: %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } #define epsilon (float)1e-5 /// /// main /// int main(int argc, char** argv) { int G; //number of thread blocks in the grid int n; //number of 2x2 matrices //host variables float result[4]; // your final answer should be assigned to this variable float result_temp[4]; float *h_A; //input 2x2 matrix sequence. Stored in a 1D array float *h_matmults; //partial output matmults per thread block //device variables float *d_A; //input 2x2 matrix sequence. Stored in a 1D array float *d_matmults; //partial output matmults per thread block if (argc < 2) { printf("Usage: %s G\n\tG - number of thread blocks in the grid\n", argv[0]); exit(0); } //init size params G = atoi(argv[1]); n=B*G*C; // UNCOMMENT THIS WHEN CHECKIN // printf("Number of threads blocks: %d\nNumber of threads per block: %d\nNumber of matrices per thread: %d\n", G, B, C); //allocating memory for host variables h_A = (float *)malloc(n*4*sizeof(float)); if (h_A==NULL) { fprintf(stderr, "Failed to allocate host array h_A!\n"); exit(-1); } h_matmults = (float *)malloc(G*4*sizeof(float)); if (h_matmults==NULL) { fprintf(stderr, "Failed to allocate host array h_matmults!\n"); exit(-1); } //initializing input matrices // We repeat the following sequence of matrices // |1.0 0.0| |4.0 3.0| |1.0 0.0| |0.4 -0.3| // |0.5 1.0| |0.0 2.5| |-0.5 1.0| |0.0 0.25| // for (int i=0; i<n*4; i+=16) { h_A[i]=1.0f; h_A[i+1]=0.0f; h_A[i+2]=0.5f; h_A[i+3]=1.0f; h_A[i+4]=4.0f; h_A[i+5]=3.0f; h_A[i+6]=0.0f; h_A[i+7]=2.5f; h_A[i+8]=1.0f; h_A[i+9]=0.0f; h_A[i+10]=-0.5f; h_A[i+11]=1.0f; h_A[i+12]=0.4f; h_A[i+13]=-0.3f; h_A[i+14]=0.0f; h_A[i+15]=0.25f; } CUDA_CHECK_RETURN(cudaSetDevice(0)); //Allocating the device memory for input vector. CUDA_CHECK_RETURN(cudaMalloc((void **)&d_A, n*4*sizeof(float))); //allocate device memory for the partial output CUDA_CHECK_RETURN(cudaMalloc((void **)&d_matmults, G*4*sizeof(float))); initialize_timer (); start_timer(); //copy the host input matrices array h_A to the device input array d_A in device //memory CUDA_CHECK_RETURN(cudaMemcpy(d_A, h_A, n*4*sizeof(float), cudaMemcpyHostToDevice)); float time_input, time_gpu, time_out; /* Start Timer */ //initialize_timer (); //start_timer(); stop_timer(); time_input=elapsed_time (); reset_timer(); start_timer(); //Launch the reduce CUDA kernel with G blocks per grid and B threads per //block multiply<<<G, B>>>(d_A, d_matmults); //check whether there were errors while launching the CUDA kernel CUDA_CHECK_RETURN(cudaGetLastError()); //wait for the kernel to finish CUDA_CHECK_RETURN(cudaThreadSynchronize()); /* stop timer */ stop_timer(); time_gpu=elapsed_time (); reset_timer(); start_timer(); //Copy partial results back to host CUDA_CHECK_RETURN(cudaMemcpy(h_matmults, d_matmults, G*4*sizeof(float), cudaMemcpyDeviceToHost)); result[0] = 1.0f; result[1] = 0.0f; result[2] = 0.0f; result[3] = 1.0f; //compute final answer by multiplying the partial answers of threads from //GPU for (int i=0; i<G; i++) { result_temp[0] = result[0]*h_matmults[i*4] + result[1]*h_matmults[i*4+2]; result_temp[1] = result[0]*h_matmults[i*4+1] + result[1]*h_matmults[i*4+3]; result_temp[2] = result[2]*h_matmults[i*4] + result[3]*h_matmults[i*4+2]; result_temp[3] = result[2]*h_matmults[i*4+1] + result[3]*h_matmults[i*4+3]; result[0] = result_temp[0]; result[1] = result_temp[1]; result[2] = result_temp[2]; result[3] = result_temp[3]; } stop_timer(); time_out=elapsed_time (); //THIS IS THE PRINT THAT SHOULD REMAIN WHEN CHECKIN!!! //printf("Output:\n%f %f\n%f %f\nTime to copy input data: %f s\nTime to compute[GPU]: %f s\nTime to compute output: %f s\n", result[0], result[1], result[2], result[3], time_input, time_gpu, time_out); //THIS IS THE PRINT FOR GENERATING report printf("%f \n", time_gpu); // printf("Result: %f , Elapsed time [GPU]: %f , all: %f\n", result, time); // if (result[0]-1.0f < epsilon && result[1]-0.0f < 0.2f && result[2]-0.0f < epsilon && result[3]-1.0f < epsilon) { if (fabsf(result[0]-1.0f) < epsilon && fabsf(result[1]-0.0f) < 0.2f && fabsf(result[2]-0.0f) < epsilon && fabsf(result[3]-1.0f) < epsilon) { // UNCOMMENT BEFORE TURNING IN! // printf("Test passed.\n"); } else { printf("Test failed.\n"); } //cleaning up free(h_A); free(h_matmults); return 0; }
c9fcf42117a298168892666e4557224e91aa43a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #define THREAD_N 256 #define N 1024 #define DIV_UP(a, b) (((a) + (b) - 1) / (b)) // Includes, system #include <stdio.h> #include <helper_cuda.h> #include <helper_string.h> #include <helper_math.h> #include "cppOverload_kernel.cuh" const char *sampleName = "C++ Function Overloading"; #define OUTPUT_ATTR(attr) \ printf("Shared Size: %d\n", (int)attr.sharedSizeBytes); \ printf("Constant Size: %d\n", (int)attr.constSizeBytes); \ printf("Local Size: %d\n", (int)attr.localSizeBytes); \ printf("Max Threads Per Block: %d\n", attr.maxThreadsPerBlock); \ printf("Number of Registers: %d\n", attr.numRegs); \ printf("PTX Version: %d\n", attr.ptxVersion); \ printf("Binary Version: %d\n", attr.binaryVersion); \ bool check_func1(int *hInput, int *hOutput, int a) { for (int i = 0; i < N; ++i) { int cpuRes = hInput[i]*a + i; if (hOutput[i] != cpuRes) { return false; } } return true; } bool check_func2(int2 *hInput, int *hOutput, int a) { for (int i = 0; i < N; i++) { int cpuRes = (hInput[i].x + hInput[i].y)*a + i; if (hOutput[i] != cpuRes) { return false; } } return true; } bool check_func3(int *hInput1, int *hInput2, int *hOutput, int a) { for (int i = 0; i < N; i++) { if (hOutput[i] != (hInput1[i] + hInput2[i])*a + i) { return false; } } return true; } int main(int argc, const char *argv[]) { int *hInput = NULL; int *hOutput = NULL; int *dInput = NULL; int *dOutput = NULL; printf("%s starting...\n", sampleName); int deviceCount; checkCudaErrors(hipGetDeviceCount(&deviceCount)); printf("DevicecheckCudaErrors Count: %d\n", deviceCount); int deviceID = findCudaDevice(argc, argv); hipDeviceProp_t prop; checkCudaErrors(hipGetDeviceProperties(&prop, deviceID)); if (prop.major < 2) { printf("ERROR: cppOverload requires GPU devices with compute SM 2.0 or higher.\n"); printf("Current GPU device has compute SM%d.%d, Exiting...", prop.major, prop.minor); exit(EXIT_WAIVED); } checkCudaErrors(hipSetDevice(deviceID)); // Allocate device memory checkCudaErrors(hipMalloc(&dInput , sizeof(int)*N*2)); checkCudaErrors(hipMalloc(&dOutput, sizeof(int)*N)); // Allocate host memory checkCudaErrors(hipHostMalloc(&hInput , sizeof(int)*N*2)); checkCudaErrors(hipHostMalloc(&hOutput, sizeof(int)*N)); for (int i = 0; i < N*2; i++) { hInput[i] = i; } // Copy data from host to device checkCudaErrors(hipMemcpy(dInput, hInput, sizeof(int)*N*2, hipMemcpyHostToDevice)); // Test C++ overloading bool testResult = true; bool funcResult = true; int a = 1; void (*func1)(const int *, int *, int); void (*func2)(const int2 *, int *, int); void (*func3)(const int *, const int *, int *, int); struct hipFuncAttributes attr; // overload function 1 func1 = simple_kernel; memset(&attr, 0, sizeof(attr)); checkCudaErrors(hipFuncSetCacheConfig(*func1, hipFuncCachePreferShared)); checkCudaErrors(hipFuncGetAttributes(&attr, *func1)); OUTPUT_ATTR(attr); (hipLaunchKernelGGL((*func1)), dim3(DIV_UP(N, THREAD_N)), dim3(THREAD_N), 0, 0, dInput, dOutput, a); checkCudaErrors(hipMemcpy(hOutput, dOutput, sizeof(int)*N, hipMemcpyDeviceToHost)); funcResult = check_func1(hInput, hOutput, a); printf("simple_kernel(const int *pIn, int *pOut, int a) %s\n\n", funcResult ? "PASSED" : "FAILED"); testResult &= funcResult; // overload function 2 func2 = simple_kernel; memset(&attr, 0, sizeof(attr)); checkCudaErrors(hipFuncSetCacheConfig(*func2, hipFuncCachePreferShared)); checkCudaErrors(hipFuncGetAttributes(&attr, *func2)); OUTPUT_ATTR(attr); (hipLaunchKernelGGL((*func2)), dim3(DIV_UP(N, THREAD_N)), dim3(THREAD_N), 0, 0, (int2 *)dInput, dOutput, a); checkCudaErrors(hipMemcpy(hOutput, dOutput, sizeof(int)*N, hipMemcpyDeviceToHost)); funcResult = check_func2(reinterpret_cast<int2 *>(hInput), hOutput, a); printf("simple_kernel(const int2 *pIn, int *pOut, int a) %s\n\n", funcResult ? "PASSED" : "FAILED"); testResult &= funcResult; // overload function 3 func3 = simple_kernel; memset(&attr, 0, sizeof(attr)); checkCudaErrors(hipFuncSetCacheConfig(*func3, hipFuncCachePreferShared)); checkCudaErrors(hipFuncGetAttributes(&attr, *func3)); OUTPUT_ATTR(attr); (hipLaunchKernelGGL((*func3)), dim3(DIV_UP(N, THREAD_N)), dim3(THREAD_N), 0, 0, dInput, dInput+N, dOutput, a); checkCudaErrors(hipMemcpy(hOutput, dOutput, sizeof(int)*N, hipMemcpyDeviceToHost)); funcResult = check_func3(&hInput[0], &hInput[N], hOutput, a); printf("simple_kernel(const int *pIn1, const int *pIn2, int *pOut, int a) %s\n\n", funcResult ? "PASSED" : "FAILED"); testResult &= funcResult; checkCudaErrors(hipFree(dInput)); checkCudaErrors(hipFree(dOutput)); checkCudaErrors(hipHostFree(hOutput)); checkCudaErrors(hipHostFree(hInput)); checkCudaErrors(hipDeviceSynchronize()); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits checkCudaErrors(hipDeviceReset()); exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE); }
c9fcf42117a298168892666e4557224e91aa43a9.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #define THREAD_N 256 #define N 1024 #define DIV_UP(a, b) (((a) + (b) - 1) / (b)) // Includes, system #include <stdio.h> #include <helper_cuda.h> #include <helper_string.h> #include <helper_math.h> #include "cppOverload_kernel.cuh" const char *sampleName = "C++ Function Overloading"; #define OUTPUT_ATTR(attr) \ printf("Shared Size: %d\n", (int)attr.sharedSizeBytes); \ printf("Constant Size: %d\n", (int)attr.constSizeBytes); \ printf("Local Size: %d\n", (int)attr.localSizeBytes); \ printf("Max Threads Per Block: %d\n", attr.maxThreadsPerBlock); \ printf("Number of Registers: %d\n", attr.numRegs); \ printf("PTX Version: %d\n", attr.ptxVersion); \ printf("Binary Version: %d\n", attr.binaryVersion); \ bool check_func1(int *hInput, int *hOutput, int a) { for (int i = 0; i < N; ++i) { int cpuRes = hInput[i]*a + i; if (hOutput[i] != cpuRes) { return false; } } return true; } bool check_func2(int2 *hInput, int *hOutput, int a) { for (int i = 0; i < N; i++) { int cpuRes = (hInput[i].x + hInput[i].y)*a + i; if (hOutput[i] != cpuRes) { return false; } } return true; } bool check_func3(int *hInput1, int *hInput2, int *hOutput, int a) { for (int i = 0; i < N; i++) { if (hOutput[i] != (hInput1[i] + hInput2[i])*a + i) { return false; } } return true; } int main(int argc, const char *argv[]) { int *hInput = NULL; int *hOutput = NULL; int *dInput = NULL; int *dOutput = NULL; printf("%s starting...\n", sampleName); int deviceCount; checkCudaErrors(cudaGetDeviceCount(&deviceCount)); printf("DevicecheckCudaErrors Count: %d\n", deviceCount); int deviceID = findCudaDevice(argc, argv); cudaDeviceProp prop; checkCudaErrors(cudaGetDeviceProperties(&prop, deviceID)); if (prop.major < 2) { printf("ERROR: cppOverload requires GPU devices with compute SM 2.0 or higher.\n"); printf("Current GPU device has compute SM%d.%d, Exiting...", prop.major, prop.minor); exit(EXIT_WAIVED); } checkCudaErrors(cudaSetDevice(deviceID)); // Allocate device memory checkCudaErrors(cudaMalloc(&dInput , sizeof(int)*N*2)); checkCudaErrors(cudaMalloc(&dOutput, sizeof(int)*N)); // Allocate host memory checkCudaErrors(cudaMallocHost(&hInput , sizeof(int)*N*2)); checkCudaErrors(cudaMallocHost(&hOutput, sizeof(int)*N)); for (int i = 0; i < N*2; i++) { hInput[i] = i; } // Copy data from host to device checkCudaErrors(cudaMemcpy(dInput, hInput, sizeof(int)*N*2, cudaMemcpyHostToDevice)); // Test C++ overloading bool testResult = true; bool funcResult = true; int a = 1; void (*func1)(const int *, int *, int); void (*func2)(const int2 *, int *, int); void (*func3)(const int *, const int *, int *, int); struct cudaFuncAttributes attr; // overload function 1 func1 = simple_kernel; memset(&attr, 0, sizeof(attr)); checkCudaErrors(cudaFuncSetCacheConfig(*func1, cudaFuncCachePreferShared)); checkCudaErrors(cudaFuncGetAttributes(&attr, *func1)); OUTPUT_ATTR(attr); (*func1)<<<DIV_UP(N, THREAD_N), THREAD_N>>>(dInput, dOutput, a); checkCudaErrors(cudaMemcpy(hOutput, dOutput, sizeof(int)*N, cudaMemcpyDeviceToHost)); funcResult = check_func1(hInput, hOutput, a); printf("simple_kernel(const int *pIn, int *pOut, int a) %s\n\n", funcResult ? "PASSED" : "FAILED"); testResult &= funcResult; // overload function 2 func2 = simple_kernel; memset(&attr, 0, sizeof(attr)); checkCudaErrors(cudaFuncSetCacheConfig(*func2, cudaFuncCachePreferShared)); checkCudaErrors(cudaFuncGetAttributes(&attr, *func2)); OUTPUT_ATTR(attr); (*func2)<<<DIV_UP(N, THREAD_N), THREAD_N>>>((int2 *)dInput, dOutput, a); checkCudaErrors(cudaMemcpy(hOutput, dOutput, sizeof(int)*N, cudaMemcpyDeviceToHost)); funcResult = check_func2(reinterpret_cast<int2 *>(hInput), hOutput, a); printf("simple_kernel(const int2 *pIn, int *pOut, int a) %s\n\n", funcResult ? "PASSED" : "FAILED"); testResult &= funcResult; // overload function 3 func3 = simple_kernel; memset(&attr, 0, sizeof(attr)); checkCudaErrors(cudaFuncSetCacheConfig(*func3, cudaFuncCachePreferShared)); checkCudaErrors(cudaFuncGetAttributes(&attr, *func3)); OUTPUT_ATTR(attr); (*func3)<<<DIV_UP(N, THREAD_N), THREAD_N>>>(dInput, dInput+N, dOutput, a); checkCudaErrors(cudaMemcpy(hOutput, dOutput, sizeof(int)*N, cudaMemcpyDeviceToHost)); funcResult = check_func3(&hInput[0], &hInput[N], hOutput, a); printf("simple_kernel(const int *pIn1, const int *pIn2, int *pOut, int a) %s\n\n", funcResult ? "PASSED" : "FAILED"); testResult &= funcResult; checkCudaErrors(cudaFree(dInput)); checkCudaErrors(cudaFree(dOutput)); checkCudaErrors(cudaFreeHost(hOutput)); checkCudaErrors(cudaFreeHost(hInput)); checkCudaErrors(cudaDeviceSynchronize()); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits checkCudaErrors(cudaDeviceReset()); exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE); }
1b75d204453ec9f98ccf3f2e8d3a22bd96d688c2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #include <time.h> #include <sys/time.h> #include "bmp.h" // data is 3D, total size is DATA_DIM x DATA_DIM x DATA_DIM #define DATA_DIM 512 #define DATA_SIZE (DATA_DIM * DATA_DIM * DATA_DIM) #define DATA_SIZE_BYTES (sizeof(unsigned char) * DATA_SIZE) // image is 2D, total size is IMAGE_DIM x IMAGE_DIM #define IMAGE_DIM 512 #define IMAGE_SIZE (IMAGE_DIM * IMAGE_DIM) #define IMAGE_SIZE_BYTES (sizeof(unsigned char) * IMAGE_SIZE) texture<char, hipTextureType3D, hipReadModeNormalizedFloat> data_texture; texture<char, hipTextureType3D, hipReadModeNormalizedFloat> region_texture; void print_time(struct timeval start, struct timeval end){ long int ms = ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)); double s = ms/1e6; printf("Time : %f s\n", s); } // Stack for the serial region growing typedef struct{ int size; int buffer_size; int3* pixels; } stack_t; stack_t* new_stack(){ stack_t* stack = (stack_t*)malloc(sizeof(stack_t)); stack->size = 0; stack->buffer_size = 1024; stack->pixels = (int3*)malloc(sizeof(int3)*1024); return stack; } void push(stack_t* stack, int3 p){ if(stack->size == stack->buffer_size){ stack->buffer_size *= 2; int3* temp = stack->pixels; stack->pixels = (int3*)malloc(sizeof(int3)*stack->buffer_size); memcpy(stack->pixels, temp, sizeof(int3)*stack->buffer_size/2); free(temp); } stack->pixels[stack->size] = p; stack->size += 1; } int3 pop(stack_t* stack){ stack->size -= 1; return stack->pixels[stack->size]; } // float3 utilities __host__ __device__ float3 cross(float3 a, float3 b){ float3 c; c.x = a.y*b.z - a.z*b.y; c.y = a.z*b.x - a.x*b.z; c.z = a.x*b.y - a.y*b.x; return c; } __host__ __device__ float3 normalize(float3 v){ float l = sqrt(v.x*v.x + v.y*v.y + v.z*v.z); v.x /= l; v.y /= l; v.z /= l; return v; } __host__ __device__ float3 add(float3 a, float3 b){ a.x += b.x; a.y += b.y; a.z += b.z; return a; } __host__ __device__ float3 scale(float3 a, float b){ a.x *= b; a.y *= b; a.z *= b; return a; } // Prints CUDA device properties void print_properties(){ int deviceCount = 0; hipGetDeviceCount(&deviceCount); printf("Device count: %d\n", deviceCount); hipDeviceProp_t p; hipSetDevice(0); hipGetDeviceProperties (&p, 0); printf("Compute capability: %d.%d\n", p.major, p.minor); printf("Name: %s\n" , p.name); printf("\n\n"); } // Fills data with values unsigned char func(int x, int y, int z){ unsigned char value = rand() % 20; int x1 = 300; int y1 = 400; int z1 = 100; float dist = sqrt((x-x1)*(x-x1) + (y-y1)*(y-y1) + (z-z1)*(z-z1)); if(dist < 100){ value = 30; } x1 = 100; y1 = 200; z1 = 400; dist = sqrt((x-x1)*(x-x1) + (y-y1)*(y-y1) + (z-z1)*(z-z1)); if(dist < 50){ value = 50; } if(x > 200 && x < 300 && y > 300 && y < 500 && z > 200 && z < 300){ value = 45; } if(x > 0 && x < 100 && y > 250 && y < 400 && z > 250 && z < 400){ value =35; } return value; } unsigned char* create_data(){ unsigned char* data = (unsigned char*)malloc(sizeof(unsigned char) * DATA_DIM*DATA_DIM*DATA_DIM); for(int i = 0; i < DATA_DIM; i++){ for(int j = 0; j < DATA_DIM; j++){ for(int k = 0; k < DATA_DIM; k++){ data[i*DATA_DIM*DATA_DIM + j*DATA_DIM+ k]= func(k,j,i); } } } return data; } // Checks if position is inside the volume (float3 and int3 versions) __host__ __device__ int inside(float3 pos){ int x = (pos.x >= 0 && pos.x < DATA_DIM-1); int y = (pos.y >= 0 && pos.y < DATA_DIM-1); int z = (pos.z >= 0 && pos.z < DATA_DIM-1); return x && y && z; } __host__ __device__ int inside(int3 pos){ int x = (pos.x >= 0 && pos.x < DATA_DIM); int y = (pos.y >= 0 && pos.y < DATA_DIM); int z = (pos.z >= 0 && pos.z < DATA_DIM); return x && y && z; } // Indexing function (note the argument order) __host__ __device__ int index(int z, int y, int x){ return z * DATA_DIM*DATA_DIM + y*DATA_DIM + x; } // Trilinear interpolation __host__ __device__ float value_at(float3 pos, unsigned char* data){ if(!inside(pos)){ return 0; } int x = floor(pos.x); int y = floor(pos.y); int z = floor(pos.z); int x_u = ceil(pos.x); int y_u = ceil(pos.y); int z_u = ceil(pos.z); float rx = pos.x - x; float ry = pos.y - y; float rz = pos.z - z; float a0 = rx*data[index(z,y,x)] + (1-rx)*data[index(z,y,x_u)]; float a1 = rx*data[index(z,y_u,x)] + (1-rx)*data[index(z,y_u,x_u)]; float a2 = rx*data[index(z_u,y,x)] + (1-rx)*data[index(z_u,y,x_u)]; float a3 = rx*data[index(z_u,y_u,x)] + (1-rx)*data[index(z_u,y_u,x_u)]; float b0 = ry*a0 + (1-ry)*a1; float b1 = ry*a2 + (1-ry)*a3; float c0 = rz*b0 + (1-rz)*b1; return c0; } // Serial ray casting unsigned char* raycast_serial(unsigned char* data, unsigned char* region){ unsigned char* image = (unsigned char*)malloc(sizeof(unsigned char)*IMAGE_DIM*IMAGE_DIM); // Camera/eye position, and direction of viewing. These can be changed to look // at the volume from different angles. float3 camera = {.x=1000,.y=1000,.z=1000}; float3 forward = {.x=-1, .y=-1, .z=-1}; float3 z_axis = {.x=0, .y=0, .z = 1}; // Finding vectors aligned with the axis of the image float3 right = cross(forward, z_axis); float3 up = cross(right, forward); // Creating unity lenght vectors forward = normalize(forward); right = normalize(right); up = normalize(up); float fov = 3.14/4; float pixel_width = tan(fov/2.0)/(IMAGE_DIM/2); float step_size = 0.5; // For each pixel for(int y = -(IMAGE_DIM/2); y < (IMAGE_DIM/2); y++){ for(int x = -(IMAGE_DIM/2); x < (IMAGE_DIM/2); x++){ // Find the ray for this pixel float3 screen_center = add(camera, forward); float3 ray = add(add(screen_center, scale(right, x*pixel_width)), scale(up, y*pixel_width)); ray = add(ray, scale(camera, -1)); ray = normalize(ray); float3 pos = camera; // Move along the ray, we stop if the color becomes completely white, // or we've done 5000 iterations (5000 is a bit arbitrary, it needs // to be big enough to let rays pass through the entire volume) int i = 0; float color = 0; while(color < 255 && i < 5000){ i++; pos = add(pos, scale(ray, step_size)); // Update position int r = value_at(pos, region); // Check if we're in the region color += value_at(pos, data)*(0.01 + r) ; // Update the color based on data value, and if we're in the region } // Write final color to image image[(y+(IMAGE_DIM/2)) * IMAGE_DIM + (x+(IMAGE_DIM/2))] = color > 255 ? 255 : color; } } return image; } // Check if two values are similar, threshold can be changed. __host__ __device__ int similar(unsigned char* data, int3 a, int3 b){ unsigned char va = data[a.z * DATA_DIM*DATA_DIM + a.y*DATA_DIM + a.x]; unsigned char vb = data[b.z * DATA_DIM*DATA_DIM + b.y*DATA_DIM + b.x]; int i = abs(va-vb) < 1; return i; } // Serial region growing, same algorithm as in assignment 2 unsigned char* grow_region_serial(unsigned char* data){ unsigned char* region = (unsigned char*)calloc(sizeof(unsigned char), DATA_DIM*DATA_DIM*DATA_DIM); stack_t* stack = new_stack(); int3 seed = {.x=50, .y=300, .z=300}; push(stack, seed); region[seed.z *DATA_DIM*DATA_DIM + seed.y*DATA_DIM + seed.x] = 1; int dx[6] = {-1,1,0,0,0,0}; int dy[6] = {0,0,-1,1,0,0}; int dz[6] = {0,0,0,0,-1,1}; while(stack->size > 0){ int3 pixel = pop(stack); for(int n = 0; n < 6; n++){ int3 candidate = pixel; candidate.x += dx[n]; candidate.y += dy[n]; candidate.z += dz[n]; if(!inside(candidate)){ continue; } if(region[candidate.z * DATA_DIM*DATA_DIM + candidate.y*DATA_DIM + candidate.x]){ continue; } if(similar(data, pixel, candidate)){ push(stack, candidate); region[candidate.z * DATA_DIM*DATA_DIM + candidate.y*DATA_DIM + candidate.x] = 1; } } } return region; } __global__ void raycast_kernel(unsigned char* data, unsigned char* image, unsigned char* region){ // Camera/eye position, and direction of viewing. These can be changed to look // at the volume from different angles. float3 camera = {.x=1000,.y=1000,.z=1000}; float3 forward = {.x=-1, .y=-1, .z=-1}; float3 z_axis = {.x=0, .y=0, .z = 1}; // Finding vectors aligned with the axis of the image float3 right = cross(forward, z_axis); float3 up = cross(right, forward); // Creating unity lenght vectors forward = normalize(forward); right = normalize(right); up = normalize(up); float fov = float(3.14)/4; float pixel_width = tan(fov/float(2.0))/(IMAGE_DIM/2); float step_size = 0.5; int blocks_per_row = IMAGE_DIM/blockDim.x; int x = (blockIdx.x % blocks_per_row) * blockDim.x + threadIdx.x - (IMAGE_DIM/2); int y = blockIdx.x/blocks_per_row - (IMAGE_DIM/2); // Find the ray for this pixel float3 screen_center = add(camera, forward); float3 ray = add(add(screen_center, scale(right, x*pixel_width)), scale(up, y*pixel_width)); ray = add(ray, scale(camera, -1)); ray = normalize(ray); float3 pos = camera; // Move along the ray int i = 0; float color = 0; while(color < 255 && i < 5000){ i++; pos = add(pos, scale(ray, step_size)); // Update position int r = value_at(pos, region); // Check if we're in the region color += value_at(pos, data)*(float(0.01) + r) ; // Update the color based on data value, and if we're in the region } // Write final color to image image[(y+(IMAGE_DIM/2)) * IMAGE_DIM + (x+(IMAGE_DIM/2))] = color > 255 ? 255 : color; } __global__ void raycast_kernel_texture(unsigned char* image){ // Camera/eye position, and direction of viewing. These can be changed to look // at the volume from different angles. float3 camera = {.x=1000,.y=1000,.z=1000}; float3 forward = {.x=-1, .y=-1, .z=-1}; float3 z_axis = {.x=0, .y=0, .z = 1}; // Finding vectors aligned with the axis of the image float3 right = cross(forward, z_axis); float3 up = cross(right, forward); // Creating unity lenght vectors forward = normalize(forward); right = normalize(right); up = normalize(up); float fov = float(3.14)/4; float pixel_width = tan(fov/float(2.0))/(IMAGE_DIM/2); float step_size = 0.5; //Calculate x and y. int blocks_per_row = IMAGE_DIM/blockDim.x; int x = (blockIdx.x % blocks_per_row) * blockDim.x + threadIdx.x - (IMAGE_DIM/2); int y = blockIdx.x/blocks_per_row - (IMAGE_DIM/2); if(x >= 512 || y >= 512){ return; } // Find the ray for this pixel float3 screen_center = add(camera, forward); float3 ray = add(add(screen_center, scale(right, x*pixel_width)), scale(up, y*pixel_width)); ray = add(ray, scale(camera, -1)); ray = normalize(ray); float3 pos = camera; // Move along the ray int i = 0; float color = 0; while(color < 255 && i < 5000){ i++; pos = add(pos, scale(ray, step_size)); // Update position //Note that the texture is set to interpolate automatically int r = 255 * tex3D(region_texture, pos.x, pos.y, pos.z); // Look up value from texture if(inside(pos)){ color += 255 * tex3D(data_texture, pos.x, pos.y, pos.z)*(float(0.01) + r) ; // Update the color based on data value, and if we're in the region } } // Write final color to image image[(y+(IMAGE_DIM/2)) * IMAGE_DIM + (x+(IMAGE_DIM/2))] = color > 255 ? 255 : color; } unsigned char* raycast_gpu(unsigned char* data, unsigned char* region){ //Declare and allocate device memory unsigned char* device_image; unsigned char* device_data; unsigned char* device_region; hipMalloc(&device_image, IMAGE_SIZE_BYTES); hipMalloc(&device_data, DATA_SIZE_BYTES); hipMalloc(&device_region, DATA_SIZE_BYTES); //Copy data to the device hipMemcpy(device_data, data, DATA_SIZE_BYTES, hipMemcpyHostToDevice); hipMemcpy(device_region, region, DATA_SIZE_BYTES, hipMemcpyHostToDevice); int blocks_per_row = 64; //Must divide IMAGE_DIM. Can max be 64 int grid_size = IMAGE_DIM * blocks_per_row; int block_size = IMAGE_DIM / blocks_per_row; //Run the kernel hipLaunchKernelGGL(( raycast_kernel), dim3(grid_size), dim3(block_size), 0, 0, device_data, device_image, device_region); //Allocate memory for the result unsigned char* host_image = (unsigned char*)malloc(IMAGE_SIZE_BYTES); //Copy result from device hipMemcpy(host_image, device_image, IMAGE_SIZE_BYTES, hipMemcpyDeviceToHost); //Free device memory hipFree(device_region); hipFree(device_data); hipFree(device_image); return host_image; } unsigned char* raycast_gpu_texture(unsigned char* data, unsigned char* region){ //We let the texture interpolate automatically data_texture.filterMode = hipFilterModeLinear; region_texture.filterMode = hipFilterModeLinear; hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8,0,0,0,hipChannelFormatKindUnsigned); hipExtent extent = make_hipExtent(DATA_DIM, DATA_DIM, DATA_DIM); //Allocate arrays hipArray* data_array; hipArray* region_array; hipMalloc3DArray(&region_array, &channelDesc, extent, 0); hipMalloc3DArray(&data_array, &channelDesc, extent, 0); //Copy data to region array hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr(region, sizeof(char) * IMAGE_DIM, IMAGE_DIM, IMAGE_DIM); copyParams.dstArray = region_array; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); //Copy data to data array copyParams.srcPtr = make_hipPitchedPtr(data, sizeof(char) * IMAGE_DIM, IMAGE_DIM, IMAGE_DIM); copyParams.dstArray = data_array; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); //Bind arrays to the textures hipBindTextureToArray(data_texture, data_array); hipBindTextureToArray(region_texture, region_array); //Allocate memory for the result on the device unsigned char* device_image; hipMalloc(&device_image, IMAGE_SIZE_BYTES); int blocks_per_row = 1; //Must divide IMAGE_DIM. Can max be 64 int grid_size = IMAGE_DIM * blocks_per_row; int block_size = IMAGE_DIM / blocks_per_row; hipLaunchKernelGGL(( raycast_kernel_texture), dim3(grid_size), dim3(block_size), 0, 0, device_image); //Allocate memory to retrieve the result unsigned char* host_image = (unsigned char*)malloc(sizeof(unsigned char)*IMAGE_DIM*IMAGE_DIM); //Fetch the result hipMemcpy(host_image, device_image, IMAGE_SIZE_BYTES, hipMemcpyDeviceToHost); //Unbind textures hipUnbindTexture(data_texture); hipUnbindTexture(region_texture); //Free memory on the device hipFreeArray(data_array); hipFreeArray(region_array); hipFree(device_image); return host_image; } __global__ void region_grow_kernel(unsigned char* data, unsigned char* region, int* unfinished){ int3 voxel; voxel.x = blockIdx.x * blockDim.x + threadIdx.x; voxel.y = blockIdx.y * blockDim.y + threadIdx.y; voxel.z = blockIdx.z * blockDim.z + threadIdx.z; int ind = index(voxel.z, voxel.y, voxel.x); if(region[ind] == 2){ //Race conditions should not matter, as we only write 1s, and if one of them gets through it's enough *unfinished = 1; region[ind] = 1; int dx[6] = {-1,1,0,0,0,0}; int dy[6] = {0,0,-1,1,0,0}; int dz[6] = {0,0,0,0,-1,1}; for(int n = 0; n < 6; n++){ int3 candidate; candidate.x = voxel.x + dx[n]; candidate.y = voxel.y + dy[n]; candidate.z = voxel.z + dz[n]; if(!inside(candidate)){ continue; } if(region[index(candidate.z, candidate.y, candidate.x)]){ continue; } if(similar(data, voxel, candidate)){ region[index(candidate.z, candidate.y, candidate.x)] = 2; } } } } __device__ bool is_halo(int3 voxel, int dim){ if( voxel.x == 0 || voxel.y == 0 || voxel.z == 0){ return true; } if( voxel.x == dim - 1 || voxel.y == dim - 1 || voxel.z == dim - 1){ return true; } return false; } __global__ void region_grow_kernel_shared(unsigned char* data, unsigned char* region_global, int* unfinished){ //Shared array within the block. The halo of this 3D cube overlaps with other blocks extern __shared__ unsigned char region_local[]; __shared__ bool block_done; //Index of this voxel within shared data region_local int3 voxel_local; voxel_local.x = threadIdx.x; voxel_local.y = threadIdx.y; voxel_local.z = threadIdx.z; //Index of this voxel in the region_local int index_local = voxel_local.z * blockDim.y * blockDim.x + voxel_local.y * blockDim.x + voxel_local.x; //Global coordinates of this voxel int3 voxel_global; voxel_global.x = blockIdx.x * (blockDim.x - 2) + threadIdx.x - 1; voxel_global.y = blockIdx.y * (blockDim.y - 2) + threadIdx.y - 1; voxel_global.z = blockIdx.z * (blockDim.z - 2) + threadIdx.z - 1; //Index of this voxel in region_global int index_global = index(voxel_global.z, voxel_global.y, voxel_global.x); /* Some of our threads will be out of bounds of the global array. However, we can not simply return as in the other region grow kernel, because we are using __syncthreads(), which might deadlock if some threads have returned. Incidentally it did not deadlock with returns here instead, but that might be because some GPUs, but not all, keep a counter of live threads in a block and use it for synchronization instead of the initial count. Also, the barrier count is incremented with 32 each time a warp reaches the __syncthreads, so if the returning threads does not reduce the number of warps, it would also not deadlock. Anyway, returning and then using __syncthreads is a bad, bad idea. */ if(inside(voxel_global)){ //Copy global data into the shared block. Each thread copies one value region_local[index_local] = region_global[index_global]; } do{ block_done = true; //Sync threads here to make sure both data copy and block_done = true is completed __syncthreads(); //Important not to grow 2s on the halo, as they can't reach all neighbours //We also don't execute this for pixels outside the global volume if(region_local[index_local] == 2 && !is_halo(voxel_local, blockDim.x) && inside(voxel_global)){ region_local[index_local] = 1; int dx[6] = {-1,1,0,0,0,0}; int dy[6] = {0,0,-1,1,0,0}; int dz[6] = {0,0,0,0,-1,1}; for(int n = 0; n < 6; n++){ int3 candidate_local; candidate_local.x = voxel_local.x + dx[n]; candidate_local.y = voxel_local.y + dy[n]; candidate_local.z = voxel_local.z + dz[n]; int3 candidate_global; candidate_global.x = voxel_global.x + dx[n]; candidate_global.y = voxel_global.y + dy[n]; candidate_global.z = voxel_global.z + dz[n]; int candidate_index_local = candidate_local.z * blockDim.y * blockDim.x + candidate_local.y * blockDim.x + candidate_local.x; if(region_local[candidate_index_local] != 0){ continue; } if(similar(data, voxel_global, candidate_global)){ region_local[candidate_index_local] = 2; block_done = false; *unfinished = 1; } } } //We need to sync threads before we check block_done __syncthreads(); }while(!block_done); if(!inside(voxel_global)){ return; //There are no more __syncthreads, so it's safe to return } if(is_halo(voxel_local, blockDim.x)){ if(region_local[index_local] == 2){ //Only copy the 2s from the halo region_global[index_global] = 2; } }else{ //We want to avoid overriding 2s with 0, so only write 1s if(region_local[index_local] == 1){ region_global[index_global] = 1; } } } unsigned char* grow_region_gpu(unsigned char* host_data){ //Host variables unsigned char* host_region = (unsigned char*)calloc(sizeof(unsigned char), DATA_SIZE); int host_unfinished; //Device variables unsigned char* device_region; unsigned char* device_data; int* device_unfinished; //Allocate device memory hipMalloc(&device_region, DATA_SIZE_BYTES); hipMalloc(&device_data, DATA_SIZE_BYTES); hipMalloc(&device_unfinished, sizeof(int)); //plant seed int3 seed = {.x=50, .y=300, .z=300}; host_region[index(seed.z, seed.y, seed.x)] = 2; //Copy data to device hipMemcpy(device_region, host_region, DATA_SIZE_BYTES, hipMemcpyHostToDevice); hipMemcpy(device_data, host_data, DATA_SIZE_BYTES, hipMemcpyHostToDevice); //Calculate block and grid sizes dim3 block_size; block_size.x = 7; block_size.y = 7; block_size.z = 7; dim3 grid_size; grid_size.x = DATA_DIM / block_size.x + 1; // Add 1 to round up instead of down. grid_size.y = DATA_DIM / block_size.y + 1; grid_size.z = DATA_DIM / block_size.z + 1; //Run kernel untill completion do{ host_unfinished = 0; hipMemcpy(device_unfinished, &host_unfinished, 1, hipMemcpyHostToDevice); hipLaunchKernelGGL(( region_grow_kernel), dim3(grid_size), dim3(block_size), 0, 0, device_data, device_region, device_unfinished); hipMemcpy(&host_unfinished, device_unfinished, 1, hipMemcpyDeviceToHost); }while(host_unfinished); //Copy result to host hipMemcpy(host_region, device_region, DATA_SIZE_BYTES, hipMemcpyDeviceToHost); //Free device memory hipFree(device_region); hipFree(device_data); hipFree(device_unfinished); return host_region; } unsigned char* grow_region_gpu_shared(unsigned char* host_data){ //Host variables unsigned char* host_region = (unsigned char*)calloc(sizeof(unsigned char), DATA_SIZE); int host_unfinished; //Device variables unsigned char* device_region; unsigned char* device_data; int* device_unfinished; //Allocate device memory hipMalloc(&device_region, DATA_SIZE_BYTES); hipMalloc(&device_data, DATA_SIZE_BYTES); hipMalloc(&device_unfinished, sizeof(int)); //plant seed int3 seed = {.x=50, .y=300, .z=300}; host_region[index(seed.z, seed.y, seed.x)] = 2; //Copy data to device hipMemcpy(device_region, host_region, DATA_SIZE_BYTES, hipMemcpyHostToDevice); hipMemcpy(device_data, host_data, DATA_SIZE_BYTES, hipMemcpyHostToDevice); /* Block size here is padded by 2 to enable overlapping halo. So if the block_size is 9x9x9, it is a 7x7x7 block with an overlapping halo wrapping it. */ dim3 block_size; block_size.x = 10; block_size.y = 10; block_size.z = 10; /* Grid size is calculated without the halos, hence -2. */ dim3 grid_size; grid_size.x = DATA_DIM / (block_size.x - 2) + 1; grid_size.y = DATA_DIM / (block_size.y - 2) + 1; grid_size.z = DATA_DIM / (block_size.z - 2) + 1; //Calculate the size of the shared region array within the kernel int local_region_size = sizeof(char) * block_size.x * block_size.y * block_size.z; //Execute the kernel untill done do{ host_unfinished = 0; hipMemcpy(device_unfinished, &host_unfinished, 1, hipMemcpyHostToDevice); hipLaunchKernelGGL(( region_grow_kernel_shared), dim3(grid_size), dim3(block_size), local_region_size, 0, device_data, device_region, device_unfinished); hipMemcpy(&host_unfinished, device_unfinished, 1, hipMemcpyDeviceToHost); }while(host_unfinished != 0); //Copy result to host hipMemcpy(host_region, device_region, DATA_SIZE_BYTES, hipMemcpyDeviceToHost); //Free device memory hipFree(device_region); hipFree(device_data); hipFree(device_unfinished); return host_region; } int main(int argc, char** argv){ struct timeval start, end; print_properties(); unsigned char* data = create_data(); /*-------REGION GROWING--------*/ gettimeofday(&start, NULL); unsigned char* region = grow_region_gpu_shared(data); gettimeofday(&end, NULL); printf("\nGrow time:\n"); print_time(start, end); printf("Errors: %s\n", hipGetErrorString(hipGetLastError())); /*-------RAY CASTING --------*/ gettimeofday(&start, NULL); unsigned char* image = raycast_gpu_texture(data, region); gettimeofday(&end, NULL); printf("\nRaycast time: \n"); print_time(start, end); printf("Errors: %s\n", hipGetErrorString(hipGetLastError())); write_bmp(image, IMAGE_DIM, IMAGE_DIM); free(data); free(region); free(image); }
1b75d204453ec9f98ccf3f2e8d3a22bd96d688c2.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #include <time.h> #include <sys/time.h> #include "bmp.h" // data is 3D, total size is DATA_DIM x DATA_DIM x DATA_DIM #define DATA_DIM 512 #define DATA_SIZE (DATA_DIM * DATA_DIM * DATA_DIM) #define DATA_SIZE_BYTES (sizeof(unsigned char) * DATA_SIZE) // image is 2D, total size is IMAGE_DIM x IMAGE_DIM #define IMAGE_DIM 512 #define IMAGE_SIZE (IMAGE_DIM * IMAGE_DIM) #define IMAGE_SIZE_BYTES (sizeof(unsigned char) * IMAGE_SIZE) texture<char, cudaTextureType3D, cudaReadModeNormalizedFloat> data_texture; texture<char, cudaTextureType3D, cudaReadModeNormalizedFloat> region_texture; void print_time(struct timeval start, struct timeval end){ long int ms = ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)); double s = ms/1e6; printf("Time : %f s\n", s); } // Stack for the serial region growing typedef struct{ int size; int buffer_size; int3* pixels; } stack_t; stack_t* new_stack(){ stack_t* stack = (stack_t*)malloc(sizeof(stack_t)); stack->size = 0; stack->buffer_size = 1024; stack->pixels = (int3*)malloc(sizeof(int3)*1024); return stack; } void push(stack_t* stack, int3 p){ if(stack->size == stack->buffer_size){ stack->buffer_size *= 2; int3* temp = stack->pixels; stack->pixels = (int3*)malloc(sizeof(int3)*stack->buffer_size); memcpy(stack->pixels, temp, sizeof(int3)*stack->buffer_size/2); free(temp); } stack->pixels[stack->size] = p; stack->size += 1; } int3 pop(stack_t* stack){ stack->size -= 1; return stack->pixels[stack->size]; } // float3 utilities __host__ __device__ float3 cross(float3 a, float3 b){ float3 c; c.x = a.y*b.z - a.z*b.y; c.y = a.z*b.x - a.x*b.z; c.z = a.x*b.y - a.y*b.x; return c; } __host__ __device__ float3 normalize(float3 v){ float l = sqrt(v.x*v.x + v.y*v.y + v.z*v.z); v.x /= l; v.y /= l; v.z /= l; return v; } __host__ __device__ float3 add(float3 a, float3 b){ a.x += b.x; a.y += b.y; a.z += b.z; return a; } __host__ __device__ float3 scale(float3 a, float b){ a.x *= b; a.y *= b; a.z *= b; return a; } // Prints CUDA device properties void print_properties(){ int deviceCount = 0; cudaGetDeviceCount(&deviceCount); printf("Device count: %d\n", deviceCount); cudaDeviceProp p; cudaSetDevice(0); cudaGetDeviceProperties (&p, 0); printf("Compute capability: %d.%d\n", p.major, p.minor); printf("Name: %s\n" , p.name); printf("\n\n"); } // Fills data with values unsigned char func(int x, int y, int z){ unsigned char value = rand() % 20; int x1 = 300; int y1 = 400; int z1 = 100; float dist = sqrt((x-x1)*(x-x1) + (y-y1)*(y-y1) + (z-z1)*(z-z1)); if(dist < 100){ value = 30; } x1 = 100; y1 = 200; z1 = 400; dist = sqrt((x-x1)*(x-x1) + (y-y1)*(y-y1) + (z-z1)*(z-z1)); if(dist < 50){ value = 50; } if(x > 200 && x < 300 && y > 300 && y < 500 && z > 200 && z < 300){ value = 45; } if(x > 0 && x < 100 && y > 250 && y < 400 && z > 250 && z < 400){ value =35; } return value; } unsigned char* create_data(){ unsigned char* data = (unsigned char*)malloc(sizeof(unsigned char) * DATA_DIM*DATA_DIM*DATA_DIM); for(int i = 0; i < DATA_DIM; i++){ for(int j = 0; j < DATA_DIM; j++){ for(int k = 0; k < DATA_DIM; k++){ data[i*DATA_DIM*DATA_DIM + j*DATA_DIM+ k]= func(k,j,i); } } } return data; } // Checks if position is inside the volume (float3 and int3 versions) __host__ __device__ int inside(float3 pos){ int x = (pos.x >= 0 && pos.x < DATA_DIM-1); int y = (pos.y >= 0 && pos.y < DATA_DIM-1); int z = (pos.z >= 0 && pos.z < DATA_DIM-1); return x && y && z; } __host__ __device__ int inside(int3 pos){ int x = (pos.x >= 0 && pos.x < DATA_DIM); int y = (pos.y >= 0 && pos.y < DATA_DIM); int z = (pos.z >= 0 && pos.z < DATA_DIM); return x && y && z; } // Indexing function (note the argument order) __host__ __device__ int index(int z, int y, int x){ return z * DATA_DIM*DATA_DIM + y*DATA_DIM + x; } // Trilinear interpolation __host__ __device__ float value_at(float3 pos, unsigned char* data){ if(!inside(pos)){ return 0; } int x = floor(pos.x); int y = floor(pos.y); int z = floor(pos.z); int x_u = ceil(pos.x); int y_u = ceil(pos.y); int z_u = ceil(pos.z); float rx = pos.x - x; float ry = pos.y - y; float rz = pos.z - z; float a0 = rx*data[index(z,y,x)] + (1-rx)*data[index(z,y,x_u)]; float a1 = rx*data[index(z,y_u,x)] + (1-rx)*data[index(z,y_u,x_u)]; float a2 = rx*data[index(z_u,y,x)] + (1-rx)*data[index(z_u,y,x_u)]; float a3 = rx*data[index(z_u,y_u,x)] + (1-rx)*data[index(z_u,y_u,x_u)]; float b0 = ry*a0 + (1-ry)*a1; float b1 = ry*a2 + (1-ry)*a3; float c0 = rz*b0 + (1-rz)*b1; return c0; } // Serial ray casting unsigned char* raycast_serial(unsigned char* data, unsigned char* region){ unsigned char* image = (unsigned char*)malloc(sizeof(unsigned char)*IMAGE_DIM*IMAGE_DIM); // Camera/eye position, and direction of viewing. These can be changed to look // at the volume from different angles. float3 camera = {.x=1000,.y=1000,.z=1000}; float3 forward = {.x=-1, .y=-1, .z=-1}; float3 z_axis = {.x=0, .y=0, .z = 1}; // Finding vectors aligned with the axis of the image float3 right = cross(forward, z_axis); float3 up = cross(right, forward); // Creating unity lenght vectors forward = normalize(forward); right = normalize(right); up = normalize(up); float fov = 3.14/4; float pixel_width = tan(fov/2.0)/(IMAGE_DIM/2); float step_size = 0.5; // For each pixel for(int y = -(IMAGE_DIM/2); y < (IMAGE_DIM/2); y++){ for(int x = -(IMAGE_DIM/2); x < (IMAGE_DIM/2); x++){ // Find the ray for this pixel float3 screen_center = add(camera, forward); float3 ray = add(add(screen_center, scale(right, x*pixel_width)), scale(up, y*pixel_width)); ray = add(ray, scale(camera, -1)); ray = normalize(ray); float3 pos = camera; // Move along the ray, we stop if the color becomes completely white, // or we've done 5000 iterations (5000 is a bit arbitrary, it needs // to be big enough to let rays pass through the entire volume) int i = 0; float color = 0; while(color < 255 && i < 5000){ i++; pos = add(pos, scale(ray, step_size)); // Update position int r = value_at(pos, region); // Check if we're in the region color += value_at(pos, data)*(0.01 + r) ; // Update the color based on data value, and if we're in the region } // Write final color to image image[(y+(IMAGE_DIM/2)) * IMAGE_DIM + (x+(IMAGE_DIM/2))] = color > 255 ? 255 : color; } } return image; } // Check if two values are similar, threshold can be changed. __host__ __device__ int similar(unsigned char* data, int3 a, int3 b){ unsigned char va = data[a.z * DATA_DIM*DATA_DIM + a.y*DATA_DIM + a.x]; unsigned char vb = data[b.z * DATA_DIM*DATA_DIM + b.y*DATA_DIM + b.x]; int i = abs(va-vb) < 1; return i; } // Serial region growing, same algorithm as in assignment 2 unsigned char* grow_region_serial(unsigned char* data){ unsigned char* region = (unsigned char*)calloc(sizeof(unsigned char), DATA_DIM*DATA_DIM*DATA_DIM); stack_t* stack = new_stack(); int3 seed = {.x=50, .y=300, .z=300}; push(stack, seed); region[seed.z *DATA_DIM*DATA_DIM + seed.y*DATA_DIM + seed.x] = 1; int dx[6] = {-1,1,0,0,0,0}; int dy[6] = {0,0,-1,1,0,0}; int dz[6] = {0,0,0,0,-1,1}; while(stack->size > 0){ int3 pixel = pop(stack); for(int n = 0; n < 6; n++){ int3 candidate = pixel; candidate.x += dx[n]; candidate.y += dy[n]; candidate.z += dz[n]; if(!inside(candidate)){ continue; } if(region[candidate.z * DATA_DIM*DATA_DIM + candidate.y*DATA_DIM + candidate.x]){ continue; } if(similar(data, pixel, candidate)){ push(stack, candidate); region[candidate.z * DATA_DIM*DATA_DIM + candidate.y*DATA_DIM + candidate.x] = 1; } } } return region; } __global__ void raycast_kernel(unsigned char* data, unsigned char* image, unsigned char* region){ // Camera/eye position, and direction of viewing. These can be changed to look // at the volume from different angles. float3 camera = {.x=1000,.y=1000,.z=1000}; float3 forward = {.x=-1, .y=-1, .z=-1}; float3 z_axis = {.x=0, .y=0, .z = 1}; // Finding vectors aligned with the axis of the image float3 right = cross(forward, z_axis); float3 up = cross(right, forward); // Creating unity lenght vectors forward = normalize(forward); right = normalize(right); up = normalize(up); float fov = float(3.14)/4; float pixel_width = tan(fov/float(2.0))/(IMAGE_DIM/2); float step_size = 0.5; int blocks_per_row = IMAGE_DIM/blockDim.x; int x = (blockIdx.x % blocks_per_row) * blockDim.x + threadIdx.x - (IMAGE_DIM/2); int y = blockIdx.x/blocks_per_row - (IMAGE_DIM/2); // Find the ray for this pixel float3 screen_center = add(camera, forward); float3 ray = add(add(screen_center, scale(right, x*pixel_width)), scale(up, y*pixel_width)); ray = add(ray, scale(camera, -1)); ray = normalize(ray); float3 pos = camera; // Move along the ray int i = 0; float color = 0; while(color < 255 && i < 5000){ i++; pos = add(pos, scale(ray, step_size)); // Update position int r = value_at(pos, region); // Check if we're in the region color += value_at(pos, data)*(float(0.01) + r) ; // Update the color based on data value, and if we're in the region } // Write final color to image image[(y+(IMAGE_DIM/2)) * IMAGE_DIM + (x+(IMAGE_DIM/2))] = color > 255 ? 255 : color; } __global__ void raycast_kernel_texture(unsigned char* image){ // Camera/eye position, and direction of viewing. These can be changed to look // at the volume from different angles. float3 camera = {.x=1000,.y=1000,.z=1000}; float3 forward = {.x=-1, .y=-1, .z=-1}; float3 z_axis = {.x=0, .y=0, .z = 1}; // Finding vectors aligned with the axis of the image float3 right = cross(forward, z_axis); float3 up = cross(right, forward); // Creating unity lenght vectors forward = normalize(forward); right = normalize(right); up = normalize(up); float fov = float(3.14)/4; float pixel_width = tan(fov/float(2.0))/(IMAGE_DIM/2); float step_size = 0.5; //Calculate x and y. int blocks_per_row = IMAGE_DIM/blockDim.x; int x = (blockIdx.x % blocks_per_row) * blockDim.x + threadIdx.x - (IMAGE_DIM/2); int y = blockIdx.x/blocks_per_row - (IMAGE_DIM/2); if(x >= 512 || y >= 512){ return; } // Find the ray for this pixel float3 screen_center = add(camera, forward); float3 ray = add(add(screen_center, scale(right, x*pixel_width)), scale(up, y*pixel_width)); ray = add(ray, scale(camera, -1)); ray = normalize(ray); float3 pos = camera; // Move along the ray int i = 0; float color = 0; while(color < 255 && i < 5000){ i++; pos = add(pos, scale(ray, step_size)); // Update position //Note that the texture is set to interpolate automatically int r = 255 * tex3D(region_texture, pos.x, pos.y, pos.z); // Look up value from texture if(inside(pos)){ color += 255 * tex3D(data_texture, pos.x, pos.y, pos.z)*(float(0.01) + r) ; // Update the color based on data value, and if we're in the region } } // Write final color to image image[(y+(IMAGE_DIM/2)) * IMAGE_DIM + (x+(IMAGE_DIM/2))] = color > 255 ? 255 : color; } unsigned char* raycast_gpu(unsigned char* data, unsigned char* region){ //Declare and allocate device memory unsigned char* device_image; unsigned char* device_data; unsigned char* device_region; cudaMalloc(&device_image, IMAGE_SIZE_BYTES); cudaMalloc(&device_data, DATA_SIZE_BYTES); cudaMalloc(&device_region, DATA_SIZE_BYTES); //Copy data to the device cudaMemcpy(device_data, data, DATA_SIZE_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(device_region, region, DATA_SIZE_BYTES, cudaMemcpyHostToDevice); int blocks_per_row = 64; //Must divide IMAGE_DIM. Can max be 64 int grid_size = IMAGE_DIM * blocks_per_row; int block_size = IMAGE_DIM / blocks_per_row; //Run the kernel raycast_kernel<<<grid_size, block_size>>>(device_data, device_image, device_region); //Allocate memory for the result unsigned char* host_image = (unsigned char*)malloc(IMAGE_SIZE_BYTES); //Copy result from device cudaMemcpy(host_image, device_image, IMAGE_SIZE_BYTES, cudaMemcpyDeviceToHost); //Free device memory cudaFree(device_region); cudaFree(device_data); cudaFree(device_image); return host_image; } unsigned char* raycast_gpu_texture(unsigned char* data, unsigned char* region){ //We let the texture interpolate automatically data_texture.filterMode = cudaFilterModeLinear; region_texture.filterMode = cudaFilterModeLinear; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8,0,0,0,cudaChannelFormatKindUnsigned); cudaExtent extent = make_cudaExtent(DATA_DIM, DATA_DIM, DATA_DIM); //Allocate arrays cudaArray* data_array; cudaArray* region_array; cudaMalloc3DArray(&region_array, &channelDesc, extent, 0); cudaMalloc3DArray(&data_array, &channelDesc, extent, 0); //Copy data to region array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr(region, sizeof(char) * IMAGE_DIM, IMAGE_DIM, IMAGE_DIM); copyParams.dstArray = region_array; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); //Copy data to data array copyParams.srcPtr = make_cudaPitchedPtr(data, sizeof(char) * IMAGE_DIM, IMAGE_DIM, IMAGE_DIM); copyParams.dstArray = data_array; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); //Bind arrays to the textures cudaBindTextureToArray(data_texture, data_array); cudaBindTextureToArray(region_texture, region_array); //Allocate memory for the result on the device unsigned char* device_image; cudaMalloc(&device_image, IMAGE_SIZE_BYTES); int blocks_per_row = 1; //Must divide IMAGE_DIM. Can max be 64 int grid_size = IMAGE_DIM * blocks_per_row; int block_size = IMAGE_DIM / blocks_per_row; raycast_kernel_texture<<<grid_size, block_size>>>(device_image); //Allocate memory to retrieve the result unsigned char* host_image = (unsigned char*)malloc(sizeof(unsigned char)*IMAGE_DIM*IMAGE_DIM); //Fetch the result cudaMemcpy(host_image, device_image, IMAGE_SIZE_BYTES, cudaMemcpyDeviceToHost); //Unbind textures cudaUnbindTexture(data_texture); cudaUnbindTexture(region_texture); //Free memory on the device cudaFreeArray(data_array); cudaFreeArray(region_array); cudaFree(device_image); return host_image; } __global__ void region_grow_kernel(unsigned char* data, unsigned char* region, int* unfinished){ int3 voxel; voxel.x = blockIdx.x * blockDim.x + threadIdx.x; voxel.y = blockIdx.y * blockDim.y + threadIdx.y; voxel.z = blockIdx.z * blockDim.z + threadIdx.z; int ind = index(voxel.z, voxel.y, voxel.x); if(region[ind] == 2){ //Race conditions should not matter, as we only write 1s, and if one of them gets through it's enough *unfinished = 1; region[ind] = 1; int dx[6] = {-1,1,0,0,0,0}; int dy[6] = {0,0,-1,1,0,0}; int dz[6] = {0,0,0,0,-1,1}; for(int n = 0; n < 6; n++){ int3 candidate; candidate.x = voxel.x + dx[n]; candidate.y = voxel.y + dy[n]; candidate.z = voxel.z + dz[n]; if(!inside(candidate)){ continue; } if(region[index(candidate.z, candidate.y, candidate.x)]){ continue; } if(similar(data, voxel, candidate)){ region[index(candidate.z, candidate.y, candidate.x)] = 2; } } } } __device__ bool is_halo(int3 voxel, int dim){ if( voxel.x == 0 || voxel.y == 0 || voxel.z == 0){ return true; } if( voxel.x == dim - 1 || voxel.y == dim - 1 || voxel.z == dim - 1){ return true; } return false; } __global__ void region_grow_kernel_shared(unsigned char* data, unsigned char* region_global, int* unfinished){ //Shared array within the block. The halo of this 3D cube overlaps with other blocks extern __shared__ unsigned char region_local[]; __shared__ bool block_done; //Index of this voxel within shared data region_local int3 voxel_local; voxel_local.x = threadIdx.x; voxel_local.y = threadIdx.y; voxel_local.z = threadIdx.z; //Index of this voxel in the region_local int index_local = voxel_local.z * blockDim.y * blockDim.x + voxel_local.y * blockDim.x + voxel_local.x; //Global coordinates of this voxel int3 voxel_global; voxel_global.x = blockIdx.x * (blockDim.x - 2) + threadIdx.x - 1; voxel_global.y = blockIdx.y * (blockDim.y - 2) + threadIdx.y - 1; voxel_global.z = blockIdx.z * (blockDim.z - 2) + threadIdx.z - 1; //Index of this voxel in region_global int index_global = index(voxel_global.z, voxel_global.y, voxel_global.x); /* Some of our threads will be out of bounds of the global array. However, we can not simply return as in the other region grow kernel, because we are using __syncthreads(), which might deadlock if some threads have returned. Incidentally it did not deadlock with returns here instead, but that might be because some GPUs, but not all, keep a counter of live threads in a block and use it for synchronization instead of the initial count. Also, the barrier count is incremented with 32 each time a warp reaches the __syncthreads, so if the returning threads does not reduce the number of warps, it would also not deadlock. Anyway, returning and then using __syncthreads is a bad, bad idea. */ if(inside(voxel_global)){ //Copy global data into the shared block. Each thread copies one value region_local[index_local] = region_global[index_global]; } do{ block_done = true; //Sync threads here to make sure both data copy and block_done = true is completed __syncthreads(); //Important not to grow 2s on the halo, as they can't reach all neighbours //We also don't execute this for pixels outside the global volume if(region_local[index_local] == 2 && !is_halo(voxel_local, blockDim.x) && inside(voxel_global)){ region_local[index_local] = 1; int dx[6] = {-1,1,0,0,0,0}; int dy[6] = {0,0,-1,1,0,0}; int dz[6] = {0,0,0,0,-1,1}; for(int n = 0; n < 6; n++){ int3 candidate_local; candidate_local.x = voxel_local.x + dx[n]; candidate_local.y = voxel_local.y + dy[n]; candidate_local.z = voxel_local.z + dz[n]; int3 candidate_global; candidate_global.x = voxel_global.x + dx[n]; candidate_global.y = voxel_global.y + dy[n]; candidate_global.z = voxel_global.z + dz[n]; int candidate_index_local = candidate_local.z * blockDim.y * blockDim.x + candidate_local.y * blockDim.x + candidate_local.x; if(region_local[candidate_index_local] != 0){ continue; } if(similar(data, voxel_global, candidate_global)){ region_local[candidate_index_local] = 2; block_done = false; *unfinished = 1; } } } //We need to sync threads before we check block_done __syncthreads(); }while(!block_done); if(!inside(voxel_global)){ return; //There are no more __syncthreads, so it's safe to return } if(is_halo(voxel_local, blockDim.x)){ if(region_local[index_local] == 2){ //Only copy the 2s from the halo region_global[index_global] = 2; } }else{ //We want to avoid overriding 2s with 0, so only write 1s if(region_local[index_local] == 1){ region_global[index_global] = 1; } } } unsigned char* grow_region_gpu(unsigned char* host_data){ //Host variables unsigned char* host_region = (unsigned char*)calloc(sizeof(unsigned char), DATA_SIZE); int host_unfinished; //Device variables unsigned char* device_region; unsigned char* device_data; int* device_unfinished; //Allocate device memory cudaMalloc(&device_region, DATA_SIZE_BYTES); cudaMalloc(&device_data, DATA_SIZE_BYTES); cudaMalloc(&device_unfinished, sizeof(int)); //plant seed int3 seed = {.x=50, .y=300, .z=300}; host_region[index(seed.z, seed.y, seed.x)] = 2; //Copy data to device cudaMemcpy(device_region, host_region, DATA_SIZE_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(device_data, host_data, DATA_SIZE_BYTES, cudaMemcpyHostToDevice); //Calculate block and grid sizes dim3 block_size; block_size.x = 7; block_size.y = 7; block_size.z = 7; dim3 grid_size; grid_size.x = DATA_DIM / block_size.x + 1; // Add 1 to round up instead of down. grid_size.y = DATA_DIM / block_size.y + 1; grid_size.z = DATA_DIM / block_size.z + 1; //Run kernel untill completion do{ host_unfinished = 0; cudaMemcpy(device_unfinished, &host_unfinished, 1, cudaMemcpyHostToDevice); region_grow_kernel<<<grid_size, block_size>>>(device_data, device_region, device_unfinished); cudaMemcpy(&host_unfinished, device_unfinished, 1, cudaMemcpyDeviceToHost); }while(host_unfinished); //Copy result to host cudaMemcpy(host_region, device_region, DATA_SIZE_BYTES, cudaMemcpyDeviceToHost); //Free device memory cudaFree(device_region); cudaFree(device_data); cudaFree(device_unfinished); return host_region; } unsigned char* grow_region_gpu_shared(unsigned char* host_data){ //Host variables unsigned char* host_region = (unsigned char*)calloc(sizeof(unsigned char), DATA_SIZE); int host_unfinished; //Device variables unsigned char* device_region; unsigned char* device_data; int* device_unfinished; //Allocate device memory cudaMalloc(&device_region, DATA_SIZE_BYTES); cudaMalloc(&device_data, DATA_SIZE_BYTES); cudaMalloc(&device_unfinished, sizeof(int)); //plant seed int3 seed = {.x=50, .y=300, .z=300}; host_region[index(seed.z, seed.y, seed.x)] = 2; //Copy data to device cudaMemcpy(device_region, host_region, DATA_SIZE_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(device_data, host_data, DATA_SIZE_BYTES, cudaMemcpyHostToDevice); /* Block size here is padded by 2 to enable overlapping halo. So if the block_size is 9x9x9, it is a 7x7x7 block with an overlapping halo wrapping it. */ dim3 block_size; block_size.x = 10; block_size.y = 10; block_size.z = 10; /* Grid size is calculated without the halos, hence -2. */ dim3 grid_size; grid_size.x = DATA_DIM / (block_size.x - 2) + 1; grid_size.y = DATA_DIM / (block_size.y - 2) + 1; grid_size.z = DATA_DIM / (block_size.z - 2) + 1; //Calculate the size of the shared region array within the kernel int local_region_size = sizeof(char) * block_size.x * block_size.y * block_size.z; //Execute the kernel untill done do{ host_unfinished = 0; cudaMemcpy(device_unfinished, &host_unfinished, 1, cudaMemcpyHostToDevice); region_grow_kernel_shared<<<grid_size, block_size, local_region_size>>>(device_data, device_region, device_unfinished); cudaMemcpy(&host_unfinished, device_unfinished, 1, cudaMemcpyDeviceToHost); }while(host_unfinished != 0); //Copy result to host cudaMemcpy(host_region, device_region, DATA_SIZE_BYTES, cudaMemcpyDeviceToHost); //Free device memory cudaFree(device_region); cudaFree(device_data); cudaFree(device_unfinished); return host_region; } int main(int argc, char** argv){ struct timeval start, end; print_properties(); unsigned char* data = create_data(); /*-------REGION GROWING--------*/ gettimeofday(&start, NULL); unsigned char* region = grow_region_gpu_shared(data); gettimeofday(&end, NULL); printf("\nGrow time:\n"); print_time(start, end); printf("Errors: %s\n", cudaGetErrorString(cudaGetLastError())); /*-------RAY CASTING --------*/ gettimeofday(&start, NULL); unsigned char* image = raycast_gpu_texture(data, region); gettimeofday(&end, NULL); printf("\nRaycast time: \n"); print_time(start, end); printf("Errors: %s\n", cudaGetErrorString(cudaGetLastError())); write_bmp(image, IMAGE_DIM, IMAGE_DIM); free(data); free(region); free(image); }
00fa4485fa43ce18cbc08d86f56cc076dfb52a68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sampling.h" #include <stdio.h> #include <stdlib.h> //#define DEBUG #include "cmath" #define TOTAL_THREADS 1024 #define THREADS_PER_BLOCK 256 #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) #include "math.h" __global__ void addkernel(int row, int col, const float *aa, const float *bb, float *cc){ int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.x; cc[i*col+j]=aa[i*col+j]+bb[i*col+j]; // printf("i:%d,j:%d ",i,j,"\n"); }; void AddLaunch(const float *a,const float *b,float *c, int size[],hipStream_t stream){ // int *pts_assign = NULL; // hipMalloc(&pts_assign, 1 * 1 * 1 * sizeof(int)); // int N = DIVUP(3,5); int x = size[0],y = size[1]; dim3 numblock(2,3,1); dim3 threadPerBlock(1); hipLaunchKernelGGL(( addkernel), dim3(numblock),dim3(threadPerBlock),0,stream, x,y,a,b,c); // hipDeviceSynchronize(); // for using printf in kernel function } inline int opt_n_threads(int work_size) { const int pow_2 = ::log(static_cast<double>(work_size)) / ::log(2.0); return max(min(1 << pow_2, TOTAL_THREADS), 1); } __global__ void gather_points_kernel_fast(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, M) // output: // out: (B, C, M) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; points += bs_idx * c * n + c_idx * n; out[0] = points[idx[0]]; } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){ const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } template <unsigned int block_size> __global__ void furthest_point_sampling_kernel(int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { // dataset: (B, N, 3) // tmp: (B, N) //c is the num of channel // output: // idx: (B, M) if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; // printf("block size %d", block_size); int batch_index = blockIdx.x;//range from 0-batch size dataset += batch_index * n * 3; temp += batch_index * n; idxs += batch_index * m; int tid= threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[old * 3 + 0]; float y1 = dataset[old * 3 + 1]; float z1 = dataset[old * 3 + 2]; int for_cout = 1; for (int k = tid; k < n; k += stride) { float x2, y2, z2; x2 = dataset[k * 3 + 0]; y2 = dataset[k * 3 + 1]; z2 = dataset[k * 3 + 2]; // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); // if (mag <= 1e-3) // continue; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2;//the distance to fps points besti = d2 > best ? k : besti;//the further point id best = d2 > best ? d2 : best;// the further point distance for_cout++; } // printf("for cout %d", for_cout); // printf("bockIdx.x %d,threadIdx.x:%d, d2:%.4f",batch_index,tid,for_cout); dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 1024) { if (tid < 512) { __update(dists, dists_i, tid, tid + 512); } __syncthreads(); } if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, const float *points, const int *idx, float *out, hipStream_t stream) { // points: (B, C, N) // idx: (B, npoints) // output: // out: (B, C, npoints) hipError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( gather_points_kernel_fast), dim3(blocks), dim3(threads), 0, stream, b, c, n, npoints, points, idx, out); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } void furthest_point_sampling_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs, hipStream_t stream) { // dataset: (B, N, 3) // tmp: (B, N) // ouut: // idx: (B, M) hipError_t err; unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: hipLaunchKernelGGL(( furthest_point_sampling_kernel<1024>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 512: hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 256: hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 128: hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 64: hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 32: hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 16: hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 8: hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 4: hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 2: hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 1: hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; default: hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); } // hipDeviceSynchronize();//using for print in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } template <unsigned int block_size> __global__ void further_sample_points_with_features_kernel(int b, int n, int m,int m1,int c, const float *__restrict__ dataset,const int *__restrict__ predix, float *__restrict__ temp,float *__restrict__ values1,float *__restrict__ values2, int *__restrict__ idxs){ if (m <= 0) return; __shared__ float dist[block_size]; __shared__ int disti[block_size]; int bs = blockIdx.x; dataset += bs*n*c; temp += bs*n; idxs += bs*m; predix += bs*m1; // predix += bs*m1;0 int tid = threadIdx.x; const int stride = block_size; int pred_idx; int sums = 0; for (int j = tid;j<n; j += stride){//Traverse all raw points // printf(" %d ",j); sums++; float pred_best = 1e38; float pred_p1,pred_p2; for(int k=0;k<m1;k++){ // printf(" %d ",k);//traverse all forground points to inital temp pred_idx = predix[k]; float pred_d = 0; for(int c_id=0;c_id<c;c_id++){ pred_p1= dataset[pred_idx*c+c_id]; pred_p2 = dataset[j*c+c_id]; pred_d += (pred_p2-pred_p1)* (pred_p2-pred_p1); }//calculate (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (y2-y1)*(y2-y1)... pred_best = min(pred_best,pred_d); } temp[j] = pred_best; } __syncthreads();//initalize the old and pred_best int old=0; float pred_best = -1; for (int i=0; i<n;i++){ if(pred_best<temp[i]){ pred_best = temp[i]; old=i; } } if (threadIdx.x==0){ idxs[0] = old;//randomly select a thread and randomly give the firt idx of keypoints } __syncthreads(); for (int i = 1; i<m;i++){ float best = -1; int besti=0; float x1 = dataset[old*c+0]; float y1 = dataset[old*c+1]; float z1 = dataset[old*c+2]; // float f1 = dataset[old*c+3]; // for (int key_id= 0;key_id<c;key_id++){ // values1[key_id] = dataset[old*c+key_id]; // }//get the x1,y1,z1,f11,f21,f31,...of keypoints // printf("%.f",x1); for (int k= tid;k<n;k+=stride){ float x2 = dataset[k*c+0]; float y2 = dataset[k*c+1]; float z2 = dataset[k*c+2]; // float f2 = dataset[k*c+3]; // for (int raw_id=0;raw_id<c;raw_id++){ // values2[raw_id] = dataset[k*c+raw_id]; // }//get the x2,y2,z2,f12,f22,f32,f42,.... of raw point cloud // float d = 0; // for(int c_id = 0 ; c_id<c;c_id++){ // d += (values2[c_id]-values1[c_id])*(values2[c_id]-values1[c_id]); // }//calculate the distance between the sampled and unsampled points float d = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (z2-z1)*(z2-z1); float d2 = min(temp[k],d); temp[k] = d2;//identify the distance between the sampled points and unsampled points if(d2>best){ best = d2; besti = k; }//chose the further points } dist[tid] = best; disti[tid] = besti; __syncthreads(); if(block_size>=1024){ if(tid<512){ __update(dist,disti,tid,tid+512); } __syncthreads(); } if (block_size>=512){ if (tid<256){ __update(dist,disti,tid,tid+256); } __syncthreads(); } if(block_size>=256){ if (tid<128){ __update(dist,disti,tid,tid+128); } __syncthreads(); } if(block_size>=128){ if(tid<64){ __update(dist,disti,tid,tid+64); } __syncthreads(); } if(block_size>=64){ if(tid<32){ __update(dist,disti,tid,tid+32); } __syncthreads(); } if(block_size>=32){ if(tid<16){ __update(dist,disti,tid,tid+16); } __syncthreads(); } if(block_size>=16){ if(tid<8){ __update(dist,disti,tid,tid+8); } __syncthreads(); } if(block_size>=8){ if(tid<4){ __update(dist,disti,tid,tid+4); } __syncthreads(); } if(block_size>=4){ if(tid<2){ __update(dist,disti,tid,tid+2); } __syncthreads(); } if(block_size>=2){ if(tid<1){ __update(dist,disti,tid,tid+1); } __syncthreads(); } old = disti[0]; if(tid==0){ idxs[i] = old; }// for avoid repeatly assginment in all thread , so there is if condition , in fact it can be replaceed by other format like tid==1 or tid ==3 } } void further_point_sampling_with_features_kernel_launch(int b, int n, int m,int m1,int c, const float *dataset,const int *predix, float *temp, int *idxs, float *values1, float * values2, hipStream_t stream){ //values1 is a 1D array shape is (c) c is the channel of datatset hipError_t err; unsigned int n_tread =opt_n_threads(n); switch (n_tread) { case 1024: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<1024>), dim3(b),dim3(n_tread), 0, 0, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 512: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<512>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 256: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<256>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 128: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<128>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 64: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<64>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 32: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<32>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 16: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<16>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 8: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<8>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 4: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<4>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 2: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<2>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 1: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<1>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; default: hipLaunchKernelGGL(( further_sample_points_with_features_kernel<512>), dim3(b),dim3(n_tread),0,stream, b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; } // hipDeviceSynchronize(); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } }
00fa4485fa43ce18cbc08d86f56cc076dfb52a68.cu
#include "sampling.h" #include <stdio.h> #include <stdlib.h> //#define DEBUG #include "cmath" #define TOTAL_THREADS 1024 #define THREADS_PER_BLOCK 256 #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) #include "math.h" __global__ void addkernel(int row, int col, const float *aa, const float *bb, float *cc){ int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.x; cc[i*col+j]=aa[i*col+j]+bb[i*col+j]; // printf("i:%d,j:%d ",i,j,"\n"); }; void AddLaunch(const float *a,const float *b,float *c, int size[],cudaStream_t stream){ // int *pts_assign = NULL; // cudaMalloc(&pts_assign, 1 * 1 * 1 * sizeof(int)); // int N = DIVUP(3,5); int x = size[0],y = size[1]; dim3 numblock(2,3,1); dim3 threadPerBlock(1); addkernel<<<numblock,threadPerBlock,0,stream>>>(x,y,a,b,c); // cudaDeviceSynchronize(); // for using printf in kernel function } inline int opt_n_threads(int work_size) { const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0); return max(min(1 << pow_2, TOTAL_THREADS), 1); } __global__ void gather_points_kernel_fast(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, M) // output: // out: (B, C, M) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; points += bs_idx * c * n + c_idx * n; out[0] = points[idx[0]]; } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){ const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } template <unsigned int block_size> __global__ void furthest_point_sampling_kernel(int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { // dataset: (B, N, 3) // tmp: (B, N) //c is the num of channel // output: // idx: (B, M) if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; // printf("block size %d", block_size); int batch_index = blockIdx.x;//range from 0-batch size dataset += batch_index * n * 3; temp += batch_index * n; idxs += batch_index * m; int tid= threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[old * 3 + 0]; float y1 = dataset[old * 3 + 1]; float z1 = dataset[old * 3 + 2]; int for_cout = 1; for (int k = tid; k < n; k += stride) { float x2, y2, z2; x2 = dataset[k * 3 + 0]; y2 = dataset[k * 3 + 1]; z2 = dataset[k * 3 + 2]; // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); // if (mag <= 1e-3) // continue; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2;//the distance to fps points besti = d2 > best ? k : besti;//the further point id best = d2 > best ? d2 : best;// the further point distance for_cout++; } // printf("for cout %d", for_cout); // printf("bockIdx.x %d,threadIdx.x:%d, d2:%.4f",batch_index,tid,for_cout); dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 1024) { if (tid < 512) { __update(dists, dists_i, tid, tid + 512); } __syncthreads(); } if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, const float *points, const int *idx, float *out, cudaStream_t stream) { // points: (B, C, N) // idx: (B, npoints) // output: // out: (B, C, npoints) cudaError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); gather_points_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, npoints, points, idx, out); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } void furthest_point_sampling_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs, cudaStream_t stream) { // dataset: (B, N, 3) // tmp: (B, N) // ouut: // idx: (B, M) cudaError_t err; unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: furthest_point_sampling_kernel<1024><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 512: furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 256: furthest_point_sampling_kernel<256><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 128: furthest_point_sampling_kernel<128><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 64: furthest_point_sampling_kernel<64><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 32: furthest_point_sampling_kernel<32><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 16: furthest_point_sampling_kernel<16><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 8: furthest_point_sampling_kernel<8><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 4: furthest_point_sampling_kernel<4><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 2: furthest_point_sampling_kernel<2><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 1: furthest_point_sampling_kernel<1><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; default: furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); } // cudaDeviceSynchronize();//using for print in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } template <unsigned int block_size> __global__ void further_sample_points_with_features_kernel(int b, int n, int m,int m1,int c, const float *__restrict__ dataset,const int *__restrict__ predix, float *__restrict__ temp,float *__restrict__ values1,float *__restrict__ values2, int *__restrict__ idxs){ if (m <= 0) return; __shared__ float dist[block_size]; __shared__ int disti[block_size]; int bs = blockIdx.x; dataset += bs*n*c; temp += bs*n; idxs += bs*m; predix += bs*m1; // predix += bs*m1;0 int tid = threadIdx.x; const int stride = block_size; int pred_idx; int sums = 0; for (int j = tid;j<n; j += stride){//Traverse all raw points // printf(" %d ",j); sums++; float pred_best = 1e38; float pred_p1,pred_p2; for(int k=0;k<m1;k++){ // printf(" %d ",k);//traverse all forground points to inital temp pred_idx = predix[k]; float pred_d = 0; for(int c_id=0;c_id<c;c_id++){ pred_p1= dataset[pred_idx*c+c_id]; pred_p2 = dataset[j*c+c_id]; pred_d += (pred_p2-pred_p1)* (pred_p2-pred_p1); }//calculate (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (y2-y1)*(y2-y1)... pred_best = min(pred_best,pred_d); } temp[j] = pred_best; } __syncthreads();//initalize the old and pred_best int old=0; float pred_best = -1; for (int i=0; i<n;i++){ if(pred_best<temp[i]){ pred_best = temp[i]; old=i; } } if (threadIdx.x==0){ idxs[0] = old;//randomly select a thread and randomly give the firt idx of keypoints } __syncthreads(); for (int i = 1; i<m;i++){ float best = -1; int besti=0; float x1 = dataset[old*c+0]; float y1 = dataset[old*c+1]; float z1 = dataset[old*c+2]; // float f1 = dataset[old*c+3]; // for (int key_id= 0;key_id<c;key_id++){ // values1[key_id] = dataset[old*c+key_id]; // }//get the x1,y1,z1,f11,f21,f31,...of keypoints // printf("%.f",x1); for (int k= tid;k<n;k+=stride){ float x2 = dataset[k*c+0]; float y2 = dataset[k*c+1]; float z2 = dataset[k*c+2]; // float f2 = dataset[k*c+3]; // for (int raw_id=0;raw_id<c;raw_id++){ // values2[raw_id] = dataset[k*c+raw_id]; // }//get the x2,y2,z2,f12,f22,f32,f42,.... of raw point cloud // float d = 0; // for(int c_id = 0 ; c_id<c;c_id++){ // d += (values2[c_id]-values1[c_id])*(values2[c_id]-values1[c_id]); // }//calculate the distance between the sampled and unsampled points float d = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (z2-z1)*(z2-z1); float d2 = min(temp[k],d); temp[k] = d2;//identify the distance between the sampled points and unsampled points if(d2>best){ best = d2; besti = k; }//chose the further points } dist[tid] = best; disti[tid] = besti; __syncthreads(); if(block_size>=1024){ if(tid<512){ __update(dist,disti,tid,tid+512); } __syncthreads(); } if (block_size>=512){ if (tid<256){ __update(dist,disti,tid,tid+256); } __syncthreads(); } if(block_size>=256){ if (tid<128){ __update(dist,disti,tid,tid+128); } __syncthreads(); } if(block_size>=128){ if(tid<64){ __update(dist,disti,tid,tid+64); } __syncthreads(); } if(block_size>=64){ if(tid<32){ __update(dist,disti,tid,tid+32); } __syncthreads(); } if(block_size>=32){ if(tid<16){ __update(dist,disti,tid,tid+16); } __syncthreads(); } if(block_size>=16){ if(tid<8){ __update(dist,disti,tid,tid+8); } __syncthreads(); } if(block_size>=8){ if(tid<4){ __update(dist,disti,tid,tid+4); } __syncthreads(); } if(block_size>=4){ if(tid<2){ __update(dist,disti,tid,tid+2); } __syncthreads(); } if(block_size>=2){ if(tid<1){ __update(dist,disti,tid,tid+1); } __syncthreads(); } old = disti[0]; if(tid==0){ idxs[i] = old; }// for avoid repeatly assginment in all thread , so there is if condition , in fact it can be replaceed by other format like tid==1 or tid ==3 } } void further_point_sampling_with_features_kernel_launch(int b, int n, int m,int m1,int c, const float *dataset,const int *predix, float *temp, int *idxs, float *values1, float * values2, cudaStream_t stream){ //values1 is a 1D array shape is (c) c is the channel of datatset cudaError_t err; unsigned int n_tread =opt_n_threads(n); switch (n_tread) { case 1024: further_sample_points_with_features_kernel<1024><<<b,n_tread>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 512: further_sample_points_with_features_kernel<512><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 256: further_sample_points_with_features_kernel<256><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 128: further_sample_points_with_features_kernel<128><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 64: further_sample_points_with_features_kernel<64><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 32: further_sample_points_with_features_kernel<32><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 16: further_sample_points_with_features_kernel<16><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 8: further_sample_points_with_features_kernel<8><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 4: further_sample_points_with_features_kernel<4><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 2: further_sample_points_with_features_kernel<2><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; case 1: further_sample_points_with_features_kernel<1><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; default: further_sample_points_with_features_kernel<512><<<b,n_tread,0,stream>>>(b,n,m,m1,c,dataset,predix,temp,values1,values2,idxs); break; } // cudaDeviceSynchronize(); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
d6ec082f6b125b73ef5df838f676c6e634c025be.hip
// !!! This is a file automatically generated by hipify!!! // CUDA runtime #include <hip/hip_runtime.h> #include <stdio.h> // Helper functions and utilities to work with CUDA // #include <helper_functions.h> /********************************************** * Check whether we read back the same input * The double check is just for debug purposes. * We can comment it out when benchmarking the time. **********************************************/ #define GPU_DEBUG /* Define all constant variavle below with a REASONABLE name */ #define out_channel_num 6 // number of feature maps #define out_y_dim 358 // height of output map #define out_x_dim 638 // width of output map #define in_y_dim 720 // height of input map #define in_x_dim 1280 // width of output map #define conv_window_y 6 // height of convolution window #define conv_window_x 6 // width of convolution window #define filter_size (conv_window_y * conv_window_x) // size of convolution window #define stride 2 // stride of layer #define init_bias_thread_x 16 // thread x dimension of init_bias #define init_bias_thread_y 16 // thread y dimension of init_bias #define init_bias_thread_z 2 // thread z dimension of init_bias #define feature_maps_thread_x 8 // thread x dimension of feature_maps #define feature_maps_thread_y 8 // thread y dimension of feature_maps #define feature_maps_thread_z 8 // thread z dimension of feature_maps #define sigmoid_thread_x 14 // thread x dimension of sigmoid #define sigmoid_thread_y 14 // thread y dimension of sigmoid #define sigmoid_thread_z 2 // thread z dimension of sigmoid /****************************************** * Device function declaration *****************************************/ __global__ void layer1_init_bias(float* d_y, float* d_bias); __global__ void layer1_feature_maps(float* d_y, unsigned char* d_in_layer, float* d_weight); __global__ void layer1_sigmoid(float* d_y, unsigned char* d_out_layer); /************************************************************************************ * Input : input image, pointer to output result, coefficients bias and weights * Output : neuron outputs of the feature maps represented as an image * Procedure: perform feed forward computation through the feature extraction layers *******************************************************************************/ void cuda_convolution_layer1(unsigned char in_layer[], unsigned char out_layer[], const float bias[], const float weight[]) { /********************************* * allocate device memory on GPU *********************************/ unsigned int size_y = out_channel_num*out_y_dim*out_x_dim; unsigned int mem_size_y = sizeof(float) * size_y; float *d_y; unsigned int size_bias = out_channel_num; unsigned int mem_size_bias = sizeof(float) * size_bias; float *d_bias; unsigned int size_weight = out_channel_num*filter_size; unsigned int mem_size_weight = sizeof(float) * size_weight; float *d_weight; unsigned int size_in_layer = in_y_dim*in_x_dim; unsigned int mem_size_in_layer = sizeof(unsigned char) * size_in_layer; unsigned char *d_in_layer; unsigned int size_out_layer = out_channel_num*out_y_dim*out_x_dim; unsigned int mem_size_out_layer = sizeof(unsigned char) * size_out_layer; unsigned char *d_out_layer; hipError_t error; /******************************** * Allocate device memory on GPU. * Check the first hipMalloc error, * in case GPU is busy. ********************************/ error = hipMalloc((void **) &d_y, mem_size_y); /* Check the error code of the first CUDA API call */ if (error != hipSuccess){ printf("hipMalloc returned error code %d, line(%d)\n", error, __LINE__); printf("CUDA error: %s\n", hipGetErrorString(error)); }else{ printf("hipMalloc success.\n"); } /* if no error for the first hipMalloc, continue other hipMalloc */ error = hipMalloc((void **) &d_in_layer, mem_size_in_layer); error = hipMalloc((void **) &d_bias, mem_size_bias); error = hipMalloc((void **) &d_weight, mem_size_weight); error = hipMalloc((void **) &d_out_layer, mem_size_out_layer); /********************************************* * copy data from host (CPU) to device (GPU) ********************************************/ error = hipMemcpy(d_in_layer, in_layer, mem_size_in_layer, hipMemcpyHostToDevice); error = hipMemcpy(d_bias, bias, mem_size_bias, hipMemcpyHostToDevice); error = hipMemcpy(d_weight, weight, mem_size_weight, hipMemcpyHostToDevice); /* Synchronize all the hipMemcpy API before doing the actual computation */ hipDeviceSynchronize(); /********************************************* * Layer 1, Step 1: * init values of feature maps at bias value ********************************************/ /* 16*16*z(choose the correct z dimension) threads per block */ /* NOTE: threads per block limit is 1024 for K80 */ /* NOTE: if you use another GPU, check the deviceQuery */ dim3 threadsPerBlock = dim3(init_bias_thread_x, init_bias_thread_y, init_bias_thread_z); dim3 blocksPerGrid = dim3((out_x_dim + init_bias_thread_x - 1) / init_bias_thread_x, (out_y_dim + init_bias_thread_y - 1) / init_bias_thread_y, (out_channel_num + init_bias_thread_z - 1) / init_bias_thread_z); hipLaunchKernelGGL(( layer1_init_bias), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_y, d_bias); /* Just in case, put a sync here */ hipDeviceSynchronize(); /********************************************* * Layer 1, Step 2: * loop over output feature maps ********************************************/ /* 8*8*z(choose the correct z dimension) threads per block */ /*********************************************** * The layer size is not diviadable by 8 either. * Mask out extra threads in the kernel. **********************************************/ threadsPerBlock = dim3(feature_maps_thread_x, feature_maps_thread_y, feature_maps_thread_z); blocksPerGrid = dim3((out_x_dim + feature_maps_thread_x - 1) / feature_maps_thread_x, (out_y_dim + feature_maps_thread_y - 1) / feature_maps_thread_y, (out_channel_num + feature_maps_thread_z - 1) / feature_maps_thread_z); // record time to execute layer1_feature_maps float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( layer1_feature_maps), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_y, d_in_layer, d_weight); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("Time to execute layer1_feature_maps: %3.1f ms \n", time); /* Just in case, put a sync here */ hipDeviceSynchronize(); /******************************************** 14*14*z(choose the correct z dimension) threads per block ******************************************** * Layer 1, Step 3: * sigmoid activation function ********************************************/ threadsPerBlock = dim3(sigmoid_thread_x, sigmoid_thread_y, sigmoid_thread_z); blocksPerGrid = dim3((out_x_dim + sigmoid_thread_x - 1) / sigmoid_thread_x, (out_y_dim + sigmoid_thread_y - 1) / sigmoid_thread_y, (out_channel_num + sigmoid_thread_z - 1) / sigmoid_thread_z); hipLaunchKernelGGL(( layer1_sigmoid), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_y, d_out_layer); /* Just in case, put a sync here */ hipDeviceSynchronize(); /* Read back the output from device (GPU) to host (CPU) */ error = hipMemcpy(out_layer, d_out_layer, mem_size_out_layer, hipMemcpyDeviceToHost); /* Just in case, put a sync here */ hipDeviceSynchronize(); /* release device memory */ hipFree(d_y); hipFree(d_in_layer); hipFree(d_bias); hipFree(d_weight); hipFree(d_out_layer); } /********************************************* * GPU kernel * Layer 1, Step 1: * init values of feature maps at bias value * 16*16*z(choose the correct z dimension) threads per block ********************************************/ __global__ void layer1_init_bias(float* d_y, float* d_bias) { int col = threadIdx.x + blockIdx.x * init_bias_thread_x; int row = threadIdx.y + blockIdx.y * init_bias_thread_y; int depth = threadIdx.z + blockIdx.z * init_bias_thread_z; if (row < out_y_dim && col < out_x_dim && depth < out_channel_num) // prevent out of bound access d_y[depth * out_y_dim * out_x_dim + row * out_x_dim + col] = d_bias[depth]; // load the bias } /********************************************* * GPU kernel * Layer 1, Step 2: * loop over output feature maps * 8*8*z(choose the correct z dimension) threads per block ********************************************/ __global__ void layer1_feature_maps(float* d_y, unsigned char* d_in_layer, float* d_weight) { int col = threadIdx.x + blockIdx.x * feature_maps_thread_x; int row = threadIdx.y + blockIdx.y * feature_maps_thread_y; int depth = threadIdx.z + blockIdx.z * feature_maps_thread_z; // cache d_in_layer __shared__ unsigned char in_layer[feature_maps_thread_y * stride + conv_window_y][feature_maps_thread_x * stride + conv_window_x]; // process [0, feature_maps_thread_y * stride - 1][0, feature_maps_thread_x * stride + conv_window_x - 1] for (int i = 0; i < stride; i++) in_layer[threadIdx.y * stride + i][threadIdx.x * stride + depth] = d_in_layer[(row * stride + i) * in_x_dim + col * stride + depth]; // process [feature_maps_thread_y * stride, feature_maps_thread_y * stride + conv_window_y - 1][0, feature_maps_thread_x * stride - 1] if (threadIdx.y == 0 && depth < conv_window_y) for (int i = 0; i < stride; i++) { in_layer[feature_maps_thread_y * stride + depth][threadIdx.x * stride + i] = d_in_layer[((row + feature_maps_thread_y) * stride + depth) * in_x_dim + col * stride + i]; } // process [feature_maps_thread_y * stride, feature_maps_thread_y * stride + conv_window_y - 1][feature_maps_thread_x * stride, feature_maps_thread_x * stride + conv_window_x - 1] if (threadIdx.x < conv_window_x && threadIdx.y == 0 && depth < conv_window_y) in_layer[feature_maps_thread_y * stride + depth][feature_maps_thread_x * stride + threadIdx.x] = d_in_layer[((row + feature_maps_thread_y) * stride + depth) * in_x_dim + (col - threadIdx.x + feature_maps_thread_x) * stride + threadIdx.x]; // cache d_weight __shared__ float weight[out_channel_num][filter_size]; if (threadIdx.y < out_y_dim && threadIdx.x < out_x_dim && depth < out_channel_num) weight[depth][threadIdx.y * conv_window_x + threadIdx.x] = d_weight[depth * filter_size + threadIdx.y * conv_window_x + threadIdx.x]; __syncthreads(); if (row < out_y_dim && col < out_x_dim && depth < out_channel_num) // prevent out of bound access for (int k = 0; k < conv_window_y; k++) // loop over convolution window (row) for (int l = 0; l < conv_window_x; l++) // loop over convolution window (column) // perform convolution over a window d_y[depth * out_y_dim * out_x_dim + row * out_x_dim + col] += in_layer[threadIdx.y * stride + k][threadIdx.x * stride + l] * weight[depth][k * conv_window_x + l]; } /********************************************* * GPU kernel * Layer 1, Step 3: * sigmoid activation function * 14*14*z(choose the correct z dimension) threads per block ********************************************/ __global__ void layer1_sigmoid(float* d_y, unsigned char* d_out_layer){ int col = threadIdx.x + blockIdx.x * sigmoid_thread_x; int row = threadIdx.y + blockIdx.y * sigmoid_thread_y; int depth = threadIdx.z + blockIdx.z * sigmoid_thread_z; int idx = depth * out_y_dim * out_x_dim + row * out_x_dim + col; // index in the grid if (row < out_y_dim && col < out_x_dim && depth < out_channel_num) d_out_layer[idx] = (unsigned char)(255.999f / (1 + expf(-d_y[idx] / 256))); // apply the sigmoid function to the result }
d6ec082f6b125b73ef5df838f676c6e634c025be.cu
// CUDA runtime #include <cuda_runtime.h> #include <stdio.h> // Helper functions and utilities to work with CUDA // #include <helper_functions.h> /********************************************** * Check whether we read back the same input * The double check is just for debug purposes. * We can comment it out when benchmarking the time. **********************************************/ #define GPU_DEBUG /* Define all constant variavle below with a REASONABLE name */ #define out_channel_num 6 // number of feature maps #define out_y_dim 358 // height of output map #define out_x_dim 638 // width of output map #define in_y_dim 720 // height of input map #define in_x_dim 1280 // width of output map #define conv_window_y 6 // height of convolution window #define conv_window_x 6 // width of convolution window #define filter_size (conv_window_y * conv_window_x) // size of convolution window #define stride 2 // stride of layer #define init_bias_thread_x 16 // thread x dimension of init_bias #define init_bias_thread_y 16 // thread y dimension of init_bias #define init_bias_thread_z 2 // thread z dimension of init_bias #define feature_maps_thread_x 8 // thread x dimension of feature_maps #define feature_maps_thread_y 8 // thread y dimension of feature_maps #define feature_maps_thread_z 8 // thread z dimension of feature_maps #define sigmoid_thread_x 14 // thread x dimension of sigmoid #define sigmoid_thread_y 14 // thread y dimension of sigmoid #define sigmoid_thread_z 2 // thread z dimension of sigmoid /****************************************** * Device function declaration *****************************************/ __global__ void layer1_init_bias(float* d_y, float* d_bias); __global__ void layer1_feature_maps(float* d_y, unsigned char* d_in_layer, float* d_weight); __global__ void layer1_sigmoid(float* d_y, unsigned char* d_out_layer); /************************************************************************************ * Input : input image, pointer to output result, coefficients bias and weights * Output : neuron outputs of the feature maps represented as an image * Procedure: perform feed forward computation through the feature extraction layers *******************************************************************************/ void cuda_convolution_layer1(unsigned char in_layer[], unsigned char out_layer[], const float bias[], const float weight[]) { /********************************* * allocate device memory on GPU *********************************/ unsigned int size_y = out_channel_num*out_y_dim*out_x_dim; unsigned int mem_size_y = sizeof(float) * size_y; float *d_y; unsigned int size_bias = out_channel_num; unsigned int mem_size_bias = sizeof(float) * size_bias; float *d_bias; unsigned int size_weight = out_channel_num*filter_size; unsigned int mem_size_weight = sizeof(float) * size_weight; float *d_weight; unsigned int size_in_layer = in_y_dim*in_x_dim; unsigned int mem_size_in_layer = sizeof(unsigned char) * size_in_layer; unsigned char *d_in_layer; unsigned int size_out_layer = out_channel_num*out_y_dim*out_x_dim; unsigned int mem_size_out_layer = sizeof(unsigned char) * size_out_layer; unsigned char *d_out_layer; cudaError_t error; /******************************** * Allocate device memory on GPU. * Check the first cudaMalloc error, * in case GPU is busy. ********************************/ error = cudaMalloc((void **) &d_y, mem_size_y); /* Check the error code of the first CUDA API call */ if (error != cudaSuccess){ printf("cudaMalloc returned error code %d, line(%d)\n", error, __LINE__); printf("CUDA error: %s\n", cudaGetErrorString(error)); }else{ printf("cudaMalloc success.\n"); } /* if no error for the first cudaMalloc, continue other cudaMalloc */ error = cudaMalloc((void **) &d_in_layer, mem_size_in_layer); error = cudaMalloc((void **) &d_bias, mem_size_bias); error = cudaMalloc((void **) &d_weight, mem_size_weight); error = cudaMalloc((void **) &d_out_layer, mem_size_out_layer); /********************************************* * copy data from host (CPU) to device (GPU) ********************************************/ error = cudaMemcpy(d_in_layer, in_layer, mem_size_in_layer, cudaMemcpyHostToDevice); error = cudaMemcpy(d_bias, bias, mem_size_bias, cudaMemcpyHostToDevice); error = cudaMemcpy(d_weight, weight, mem_size_weight, cudaMemcpyHostToDevice); /* Synchronize all the cudaMemcpy API before doing the actual computation */ cudaDeviceSynchronize(); /********************************************* * Layer 1, Step 1: * init values of feature maps at bias value ********************************************/ /* 16*16*z(choose the correct z dimension) threads per block */ /* NOTE: threads per block limit is 1024 for K80 */ /* NOTE: if you use another GPU, check the deviceQuery */ dim3 threadsPerBlock = dim3(init_bias_thread_x, init_bias_thread_y, init_bias_thread_z); dim3 blocksPerGrid = dim3((out_x_dim + init_bias_thread_x - 1) / init_bias_thread_x, (out_y_dim + init_bias_thread_y - 1) / init_bias_thread_y, (out_channel_num + init_bias_thread_z - 1) / init_bias_thread_z); layer1_init_bias<<<blocksPerGrid, threadsPerBlock>>>(d_y, d_bias); /* Just in case, put a sync here */ cudaDeviceSynchronize(); /********************************************* * Layer 1, Step 2: * loop over output feature maps ********************************************/ /* 8*8*z(choose the correct z dimension) threads per block */ /*********************************************** * The layer size is not diviadable by 8 either. * Mask out extra threads in the kernel. **********************************************/ threadsPerBlock = dim3(feature_maps_thread_x, feature_maps_thread_y, feature_maps_thread_z); blocksPerGrid = dim3((out_x_dim + feature_maps_thread_x - 1) / feature_maps_thread_x, (out_y_dim + feature_maps_thread_y - 1) / feature_maps_thread_y, (out_channel_num + feature_maps_thread_z - 1) / feature_maps_thread_z); // record time to execute layer1_feature_maps float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); layer1_feature_maps<<<blocksPerGrid, threadsPerBlock>>>(d_y, d_in_layer, d_weight); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Time to execute layer1_feature_maps: %3.1f ms \n", time); /* Just in case, put a sync here */ cudaDeviceSynchronize(); /******************************************** 14*14*z(choose the correct z dimension) threads per block ******************************************** * Layer 1, Step 3: * sigmoid activation function ********************************************/ threadsPerBlock = dim3(sigmoid_thread_x, sigmoid_thread_y, sigmoid_thread_z); blocksPerGrid = dim3((out_x_dim + sigmoid_thread_x - 1) / sigmoid_thread_x, (out_y_dim + sigmoid_thread_y - 1) / sigmoid_thread_y, (out_channel_num + sigmoid_thread_z - 1) / sigmoid_thread_z); layer1_sigmoid<<<blocksPerGrid, threadsPerBlock>>>(d_y, d_out_layer); /* Just in case, put a sync here */ cudaDeviceSynchronize(); /* Read back the output from device (GPU) to host (CPU) */ error = cudaMemcpy(out_layer, d_out_layer, mem_size_out_layer, cudaMemcpyDeviceToHost); /* Just in case, put a sync here */ cudaDeviceSynchronize(); /* release device memory */ cudaFree(d_y); cudaFree(d_in_layer); cudaFree(d_bias); cudaFree(d_weight); cudaFree(d_out_layer); } /********************************************* * GPU kernel * Layer 1, Step 1: * init values of feature maps at bias value * 16*16*z(choose the correct z dimension) threads per block ********************************************/ __global__ void layer1_init_bias(float* d_y, float* d_bias) { int col = threadIdx.x + blockIdx.x * init_bias_thread_x; int row = threadIdx.y + blockIdx.y * init_bias_thread_y; int depth = threadIdx.z + blockIdx.z * init_bias_thread_z; if (row < out_y_dim && col < out_x_dim && depth < out_channel_num) // prevent out of bound access d_y[depth * out_y_dim * out_x_dim + row * out_x_dim + col] = d_bias[depth]; // load the bias } /********************************************* * GPU kernel * Layer 1, Step 2: * loop over output feature maps * 8*8*z(choose the correct z dimension) threads per block ********************************************/ __global__ void layer1_feature_maps(float* d_y, unsigned char* d_in_layer, float* d_weight) { int col = threadIdx.x + blockIdx.x * feature_maps_thread_x; int row = threadIdx.y + blockIdx.y * feature_maps_thread_y; int depth = threadIdx.z + blockIdx.z * feature_maps_thread_z; // cache d_in_layer __shared__ unsigned char in_layer[feature_maps_thread_y * stride + conv_window_y][feature_maps_thread_x * stride + conv_window_x]; // process [0, feature_maps_thread_y * stride - 1][0, feature_maps_thread_x * stride + conv_window_x - 1] for (int i = 0; i < stride; i++) in_layer[threadIdx.y * stride + i][threadIdx.x * stride + depth] = d_in_layer[(row * stride + i) * in_x_dim + col * stride + depth]; // process [feature_maps_thread_y * stride, feature_maps_thread_y * stride + conv_window_y - 1][0, feature_maps_thread_x * stride - 1] if (threadIdx.y == 0 && depth < conv_window_y) for (int i = 0; i < stride; i++) { in_layer[feature_maps_thread_y * stride + depth][threadIdx.x * stride + i] = d_in_layer[((row + feature_maps_thread_y) * stride + depth) * in_x_dim + col * stride + i]; } // process [feature_maps_thread_y * stride, feature_maps_thread_y * stride + conv_window_y - 1][feature_maps_thread_x * stride, feature_maps_thread_x * stride + conv_window_x - 1] if (threadIdx.x < conv_window_x && threadIdx.y == 0 && depth < conv_window_y) in_layer[feature_maps_thread_y * stride + depth][feature_maps_thread_x * stride + threadIdx.x] = d_in_layer[((row + feature_maps_thread_y) * stride + depth) * in_x_dim + (col - threadIdx.x + feature_maps_thread_x) * stride + threadIdx.x]; // cache d_weight __shared__ float weight[out_channel_num][filter_size]; if (threadIdx.y < out_y_dim && threadIdx.x < out_x_dim && depth < out_channel_num) weight[depth][threadIdx.y * conv_window_x + threadIdx.x] = d_weight[depth * filter_size + threadIdx.y * conv_window_x + threadIdx.x]; __syncthreads(); if (row < out_y_dim && col < out_x_dim && depth < out_channel_num) // prevent out of bound access for (int k = 0; k < conv_window_y; k++) // loop over convolution window (row) for (int l = 0; l < conv_window_x; l++) // loop over convolution window (column) // perform convolution over a window d_y[depth * out_y_dim * out_x_dim + row * out_x_dim + col] += in_layer[threadIdx.y * stride + k][threadIdx.x * stride + l] * weight[depth][k * conv_window_x + l]; } /********************************************* * GPU kernel * Layer 1, Step 3: * sigmoid activation function * 14*14*z(choose the correct z dimension) threads per block ********************************************/ __global__ void layer1_sigmoid(float* d_y, unsigned char* d_out_layer){ int col = threadIdx.x + blockIdx.x * sigmoid_thread_x; int row = threadIdx.y + blockIdx.y * sigmoid_thread_y; int depth = threadIdx.z + blockIdx.z * sigmoid_thread_z; int idx = depth * out_y_dim * out_x_dim + row * out_x_dim + col; // index in the grid if (row < out_y_dim && col < out_x_dim && depth < out_channel_num) d_out_layer[idx] = (unsigned char)(255.999f / (1 + expf(-d_y[idx] / 256))); // apply the sigmoid function to the result }
007431ff92c2f2ec26204232fc4af49897b41b36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernelPrintf.h" //__device__ void __syncthreads(void); __global__ void kernelPrintf(int *O,int sizeO){ int i=blockIdx.x*blockDim.x + threadIdx.x; if(i<sizeO){ printf("[%d]:%d ; ",i,O[i]); } } hipError_t printInt(int* d_array,int noElem_d_Array){ hipError_t cudaStatus; dim3 block(1024); dim3 grid((noElem_d_Array+block.x-1)/block.x); hipLaunchKernelGGL(( kernelPrintf), dim3(grid),dim3(block), 0, 0, d_array,noElem_d_Array); hipDeviceSynchronize(); cudaStatus=hipGetLastError(); if(cudaStatus != hipSuccess){ fprintf(stderr,"\nkernelPrintInt failed"); goto Error; } Error: return cudaStatus; } __global__ void kernelprintUnsignedInt(unsigned int *O,int sizeO){ int i=blockIdx.x*blockDim.x + threadIdx.x; if(i<sizeO){ printf("[%d]:%d ; ",i,O[i]); } } inline hipError_t printUnsignedInt(unsigned int* d_array,int noElem_d_Array){ hipError_t cudaStatus; dim3 block(1024); dim3 grid((noElem_d_Array+block.x-1)/block.x); hipLaunchKernelGGL(( kernelprintUnsignedInt), dim3(grid),dim3(block), 0, 0, d_array,noElem_d_Array); hipDeviceSynchronize(); cudaStatus=hipGetLastError(); if(cudaStatus != hipSuccess){ fprintf(stderr,"\nkernelPrintInt failed"); goto Error; } Error: return cudaStatus; } __global__ void kernelPrintFloat(float* A,int n){ int i=blockIdx.x*blockDim.x + threadIdx.x; if (i<n){ printf("[%d]:%.0f ;",i,A[i]); } } hipError_t printFloat(float* d_array,int numberElementOfArray){ hipError_t cudaStatus; dim3 block(1024); dim3 grid((numberElementOfArray+block.x-1)/block.x); hipLaunchKernelGGL(( kernelPrintFloat), dim3(grid),dim3(block), 0, 0, d_array,numberElementOfArray); hipDeviceSynchronize(); cudaStatus=hipGetLastError(); if(cudaStatus != hipSuccess){ fprintf(stderr,"\nkernelPrintExtention failed"); goto Error; } Error: return cudaStatus; } __global__ void kernelPrintExtention(Extension *d_Extension,unsigned int n){ int i=blockIdx.x*blockDim.x + threadIdx.x; if (i<n){ printf("\n[%d]: DFS code:(%d,%d,%d,%d,%d) (vgi,vgj):(%d,%d)\n",i,d_Extension[i].vi,d_Extension[i].vj,d_Extension[i].li,d_Extension[i].lij,d_Extension[i].lj,d_Extension[i].vgi,d_Extension[i].vgj); } } hipError_t printfExtension(Extension *d_E,unsigned int noElem_d_E){ hipError_t cudaStatus; dim3 block(1024); dim3 grid((noElem_d_E+block.x-1)/block.x); hipLaunchKernelGGL(( kernelPrintExtention), dim3(grid),dim3(block), 0, 0, d_E,noElem_d_E); hipDeviceSynchronize(); cudaStatus=hipGetLastError(); if(cudaStatus != hipSuccess){ fprintf(stderr,"\nkernelPrintExtention failed"); goto Error; } Error: return cudaStatus; } __global__ void kernelPrintUniEdge(UniEdge *d_UniqueEdge,unsigned int n){ int i=blockIdx.x*blockDim.x + threadIdx.x; if (i<n){ int li = d_UniqueEdge[i].li; int lij = d_UniqueEdge[i].lij; int lj = d_UniqueEdge[i].lj; printf("\n Edge %d: (%d,%d,%d)",i,li,lij,lj); } } hipError_t printfUniEdge(UniEdge *d_E,unsigned int noElem_d_E){ hipError_t cudaStatus; dim3 block(1024); dim3 grid((noElem_d_E+block.x-1)/block.x); hipLaunchKernelGGL(( kernelPrintUniEdge), dim3(grid),dim3(block), 0, 0, d_E,noElem_d_E); hipDeviceSynchronize(); cudaStatus=hipGetLastError(); if(cudaStatus != hipSuccess){ fprintf(stderr,"\nkernelPrintUniEdge failed"); goto Error; } Error: return cudaStatus; } __global__ void kernelPrintEmbedding(struct_Embedding *d_Embedding,int noElem_Embedding){ int i=blockIdx.x*blockDim.x + threadIdx.x; if (i<noElem_Embedding){ printf("\n[%d]: (idx:%d, vid:%d)",i,d_Embedding[i].idx,d_Embedding[i].vid); } } hipError_t printEmbedding(struct_Embedding *d_Embedding,int noElem_Embedding){ hipError_t cudaStatus; dim3 block(1024); dim3 grid((noElem_Embedding+block.x-1)/block.x); hipLaunchKernelGGL(( kernelPrintEmbedding), dim3(grid),dim3(block), 0, 0, d_Embedding,noElem_Embedding); hipDeviceSynchronize(); cudaStatus=hipGetLastError(); if(cudaStatus != hipSuccess){ fprintf(stderr,"\nkernelPrintEmbedding failed"); goto Error; } Error: return cudaStatus; }
007431ff92c2f2ec26204232fc4af49897b41b36.cu
#include "kernelPrintf.h" //__device__ void __syncthreads(void); __global__ void kernelPrintf(int *O,int sizeO){ int i=blockIdx.x*blockDim.x + threadIdx.x; if(i<sizeO){ printf("[%d]:%d ; ",i,O[i]); } } cudaError_t printInt(int* d_array,int noElem_d_Array){ cudaError cudaStatus; dim3 block(1024); dim3 grid((noElem_d_Array+block.x-1)/block.x); kernelPrintf<<<grid,block>>>(d_array,noElem_d_Array); cudaDeviceSynchronize(); cudaStatus=cudaGetLastError(); if(cudaStatus != cudaSuccess){ fprintf(stderr,"\nkernelPrintInt failed"); goto Error; } Error: return cudaStatus; } __global__ void kernelprintUnsignedInt(unsigned int *O,int sizeO){ int i=blockIdx.x*blockDim.x + threadIdx.x; if(i<sizeO){ printf("[%d]:%d ; ",i,O[i]); } } inline cudaError_t printUnsignedInt(unsigned int* d_array,int noElem_d_Array){ cudaError cudaStatus; dim3 block(1024); dim3 grid((noElem_d_Array+block.x-1)/block.x); kernelprintUnsignedInt<<<grid,block>>>(d_array,noElem_d_Array); cudaDeviceSynchronize(); cudaStatus=cudaGetLastError(); if(cudaStatus != cudaSuccess){ fprintf(stderr,"\nkernelPrintInt failed"); goto Error; } Error: return cudaStatus; } __global__ void kernelPrintFloat(float* A,int n){ int i=blockIdx.x*blockDim.x + threadIdx.x; if (i<n){ printf("[%d]:%.0f ;",i,A[i]); } } cudaError_t printFloat(float* d_array,int numberElementOfArray){ cudaError cudaStatus; dim3 block(1024); dim3 grid((numberElementOfArray+block.x-1)/block.x); kernelPrintFloat<<<grid,block>>>(d_array,numberElementOfArray); cudaDeviceSynchronize(); cudaStatus=cudaGetLastError(); if(cudaStatus != cudaSuccess){ fprintf(stderr,"\nkernelPrintExtention failed"); goto Error; } Error: return cudaStatus; } __global__ void kernelPrintExtention(Extension *d_Extension,unsigned int n){ int i=blockIdx.x*blockDim.x + threadIdx.x; if (i<n){ printf("\n[%d]: DFS code:(%d,%d,%d,%d,%d) (vgi,vgj):(%d,%d)\n",i,d_Extension[i].vi,d_Extension[i].vj,d_Extension[i].li,d_Extension[i].lij,d_Extension[i].lj,d_Extension[i].vgi,d_Extension[i].vgj); } } cudaError_t printfExtension(Extension *d_E,unsigned int noElem_d_E){ cudaError cudaStatus; dim3 block(1024); dim3 grid((noElem_d_E+block.x-1)/block.x); kernelPrintExtention<<<grid,block>>>(d_E,noElem_d_E); cudaDeviceSynchronize(); cudaStatus=cudaGetLastError(); if(cudaStatus != cudaSuccess){ fprintf(stderr,"\nkernelPrintExtention failed"); goto Error; } Error: return cudaStatus; } __global__ void kernelPrintUniEdge(UniEdge *d_UniqueEdge,unsigned int n){ int i=blockIdx.x*blockDim.x + threadIdx.x; if (i<n){ int li = d_UniqueEdge[i].li; int lij = d_UniqueEdge[i].lij; int lj = d_UniqueEdge[i].lj; printf("\n Edge %d: (%d,%d,%d)",i,li,lij,lj); } } cudaError_t printfUniEdge(UniEdge *d_E,unsigned int noElem_d_E){ cudaError cudaStatus; dim3 block(1024); dim3 grid((noElem_d_E+block.x-1)/block.x); kernelPrintUniEdge<<<grid,block>>>(d_E,noElem_d_E); cudaDeviceSynchronize(); cudaStatus=cudaGetLastError(); if(cudaStatus != cudaSuccess){ fprintf(stderr,"\nkernelPrintUniEdge failed"); goto Error; } Error: return cudaStatus; } __global__ void kernelPrintEmbedding(struct_Embedding *d_Embedding,int noElem_Embedding){ int i=blockIdx.x*blockDim.x + threadIdx.x; if (i<noElem_Embedding){ printf("\n[%d]: (idx:%d, vid:%d)",i,d_Embedding[i].idx,d_Embedding[i].vid); } } cudaError_t printEmbedding(struct_Embedding *d_Embedding,int noElem_Embedding){ cudaError cudaStatus; dim3 block(1024); dim3 grid((noElem_Embedding+block.x-1)/block.x); kernelPrintEmbedding<<<grid,block>>>(d_Embedding,noElem_Embedding); cudaDeviceSynchronize(); cudaStatus=cudaGetLastError(); if(cudaStatus != cudaSuccess){ fprintf(stderr,"\nkernelPrintEmbedding failed"); goto Error; } Error: return cudaStatus; }
b8d8754ea6fe39451812b51c1c9f1f9aaa5ba974.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Cuda GPU Based Program that use GPU processor for finding cosine of numbers */ /* --------------------------- header secton ----------------------------*/ #include<stdio.h> #include<cuda.h> #define COS_THREAD_CNT 200 #define N 10000 /* --------------------------- target code ------------------------------*/ struct cosParams { float *arg; float *res; int n; }; __global__ void cos_main(struct cosParams parms) { int i; for (i = threadIdx.x; i < parms.n; i += COS_THREAD_CNT) { parms.res[i] = __cosf(parms.arg[i] ); } } /* --------------------------- host code ------------------------------*/ int main (int argc, char *argv[]) { int i = 0; hipError_t cudaStat; float* cosRes = 0; float* cosArg = 0; float* arg = (float *) malloc(N*sizeof(arg[0])); float* res = (float *) malloc(N*sizeof(res[0])); struct cosParams funcParams; /* ... fill arguments array "arg" .... */ for(i=0; i < N; i++ ){ arg[i] = (float)i; } cudaStat = hipMalloc ((void **)&cosArg, N * sizeof(cosArg[0])); if( cudaStat ) printf(" value = %d : Memory Allocation on GPU Device failed\n", cudaStat); cudaStat = hipMalloc ((void **)&cosRes, N * sizeof(cosRes[0])); if( cudaStat ) printf(" value = %d : Memory Allocation on GPU Device failed\n", cudaStat); cudaStat = hipMemcpy (cosArg, arg, N * sizeof(arg[0]), hipMemcpyHostToDevice); if( cudaStat ) printf(" Memory Copy from Host to Device failed.\n", cudaStat); funcParams.res = cosRes; funcParams.arg = cosArg; funcParams.n = N; hipLaunchKernelGGL(( cos_main), dim3(1),dim3(COS_THREAD_CNT), 0, 0, funcParams); cudaStat = hipMemcpy (res, cosRes, N * sizeof(cosRes[0]), hipMemcpyDeviceToHost); if( cudaStat ) printf(" Memory Copy from Device to Host failed.\n" , cudaStat); for(i=0; i < N; i++ ){ if ( i%10 == 0 ) printf("\n cosf(%f) = %f ", arg[i], res[i] ); } } /* nvcc cosine.cu -use_fast_math */
b8d8754ea6fe39451812b51c1c9f1f9aaa5ba974.cu
/* Cuda GPU Based Program that use GPU processor for finding cosine of numbers */ /* --------------------------- header secton ----------------------------*/ #include<stdio.h> #include<cuda.h> #define COS_THREAD_CNT 200 #define N 10000 /* --------------------------- target code ------------------------------*/ struct cosParams { float *arg; float *res; int n; }; __global__ void cos_main(struct cosParams parms) { int i; for (i = threadIdx.x; i < parms.n; i += COS_THREAD_CNT) { parms.res[i] = __cosf(parms.arg[i] ); } } /* --------------------------- host code ------------------------------*/ int main (int argc, char *argv[]) { int i = 0; cudaError_t cudaStat; float* cosRes = 0; float* cosArg = 0; float* arg = (float *) malloc(N*sizeof(arg[0])); float* res = (float *) malloc(N*sizeof(res[0])); struct cosParams funcParams; /* ... fill arguments array "arg" .... */ for(i=0; i < N; i++ ){ arg[i] = (float)i; } cudaStat = cudaMalloc ((void **)&cosArg, N * sizeof(cosArg[0])); if( cudaStat ) printf(" value = %d : Memory Allocation on GPU Device failed\n", cudaStat); cudaStat = cudaMalloc ((void **)&cosRes, N * sizeof(cosRes[0])); if( cudaStat ) printf(" value = %d : Memory Allocation on GPU Device failed\n", cudaStat); cudaStat = cudaMemcpy (cosArg, arg, N * sizeof(arg[0]), cudaMemcpyHostToDevice); if( cudaStat ) printf(" Memory Copy from Host to Device failed.\n", cudaStat); funcParams.res = cosRes; funcParams.arg = cosArg; funcParams.n = N; cos_main<<<1,COS_THREAD_CNT>>>(funcParams); cudaStat = cudaMemcpy (res, cosRes, N * sizeof(cosRes[0]), cudaMemcpyDeviceToHost); if( cudaStat ) printf(" Memory Copy from Device to Host failed.\n" , cudaStat); for(i=0; i < N; i++ ){ if ( i%10 == 0 ) printf("\n cosf(%f) = %f ", arg[i], res[i] ); } } /* nvcc cosine.cu -use_fast_math */
7b7abe6685313651d1bcff563adfe3ca2bcd2e14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //---------------------------------------------------------------------- /*!\file gpu_algorithms/motionEstimation.cu * * \author Felix Laufer * * * CUDA: Algorithms and kernels for fast phase correlation block matching motion estimation on 2d matrices * */ //---------------------------------------------------------------------- #include <hipfft.h> #include "gpu_algorithms/filterKernels.cu" typedef float2 Vec2f; namespace gpu_algorithms { namespace cuda { namespace motion_estimation { //---------------------------------------------------------------------- // Kernel functions //---------------------------------------------------------------------- // Argument of maximum reduction // Requires: blockDim.x = block stream size template<bool param_result_maximums> static __global__ void ArgumentMaximumReduction(const Complex *idata, int *result_indices, Real *result_maximums, const unsigned int maximum_iterations) { extern __shared__ float smem_MaximumArgumentReduction[]; Complex* sdata_cached = (Complex*) smem_MaximumArgumentReduction; Complex* sdata = (Complex*) &sdata_cached[blockDim.x]; int* sindices = (int*) &sdata[blockDim.x]; const unsigned int tid = threadIdx.x; const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata_cached[tid].x = idata[i].x; sdata_cached[tid].y = 0; for (unsigned int n = 0; n < maximum_iterations; ++n) { sdata[tid]= sdata_cached[tid]; sindices[tid] = tid; __syncthreads(); int floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { if (sdata[tid - floorPow2].x < sdata[tid].x) { sdata[tid - floorPow2] = sdata[tid]; sindices[tid - floorPow2] = sindices[tid]; } } __syncthreads(); } for (unsigned int s = floorPow2 >> 1; s > 0; s >>= 1) { if (tid < s) { if (sdata[tid].x < sdata[tid + s].x) { sdata[tid] = sdata[tid + s]; sindices[tid] = sindices[tid + s]; } } __syncthreads(); } if (tid == 0) { const int last_maximum_index = sindices[0]; sdata_cached[last_maximum_index] = (last_maximum_index > 0) ? sdata_cached[0] : sdata_cached[1]; const unsigned int i = blockIdx.x * maximum_iterations + n; result_indices[i] = (sdata[0].x > 0.0f) ? last_maximum_index : -1; if (param_result_maximums) { result_maximums[i] = sdata[0].x; } } __syncthreads(); } } // Real to Complex block extraction with optional overlapping, optional circular shift and optional weighting template<bool param_overlapped, bool param_shift, bool param_weighted> static __global__ void Real2ComplexMatrixBlockExtraction(const Real *idata, Complex *odata, const unsigned int nx, const unsigned int nx_block, const unsigned int nx_search_block, const Real *weights) { extern __shared__ Real smem_Real2ComplexMatrixBlockExtraction[]; const unsigned int blocks_matrices_size = blockDim.x * blockDim.y; const unsigned int blocks_count_x = ceilf((float) nx / nx_block); const unsigned int o_i_block_offset = (blockDim.x - nx_block) / 2; const unsigned int block_id = blockIdx.y * gridDim.x + blockIdx.x; unsigned int idx_x = threadIdx.x; unsigned int idx_y = threadIdx.y; int o_block_x = idx_x; int o_block_y = idx_y; const unsigned int i_block_row = block_id / blocks_count_x; const unsigned int i_block_col = block_id - i_block_row * blocks_count_x; const int i_block_x = o_block_x - o_i_block_offset; const int i_block_y = o_block_y - o_i_block_offset; Real data; if(!param_overlapped && !(0 <= i_block_x && i_block_x < nx_block && 0 <= i_block_y && i_block_y < nx_block)) { data = 0.0f; } else { const int i_matrix_x = i_block_col * nx_block + i_block_x; const int i_matrix_y = i_block_row * nx_block + i_block_y; Real weight = param_weighted ? weights[o_block_y * blockDim.x + o_block_x] : 1.0f; const bool is_valid_coordinate = (0 <= i_matrix_x && i_matrix_x < nx && 0 <= i_matrix_y && i_matrix_y < nx); data = is_valid_coordinate ? idata[i_matrix_y * nx + i_matrix_x] * weight: 0.0f; } const unsigned int i = idx_y * blockDim.x + idx_x; const unsigned int o_offset = block_id * blocks_matrices_size; if (param_shift) { smem_Real2ComplexMatrixBlockExtraction[SequentialIndex2DFFTShift(o_block_x, o_block_y, nx_search_block)] = data; __syncthreads(); odata[o_offset + i].x = smem_Real2ComplexMatrixBlockExtraction[i]; odata[o_offset + i].y = 0.0f; } else { odata[o_offset + i].x = data; odata[o_offset + i].y = 0.0f; } } // Motion indices to aspect matrix static __global__ void MotionIndices2Matrix(const int *idata, Vec2f *odata, const unsigned int size, const unsigned int matrix_size, const unsigned int block_size, const unsigned int search_block_size, const bool show_motion = false) { const unsigned int numThreads = blockDim.x * gridDim.x; const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int blocks_count = ceilf((float) matrix_size / block_size); for (unsigned int i = threadID; i < size; i += numThreads) { const unsigned int y = i / matrix_size; const unsigned int x = i - y * matrix_size; const unsigned int block_row = y / block_size; const unsigned int block_col = x / block_size; const unsigned int block_id = block_row * blocks_count + block_col; const int index = idata[block_id]; int block_x = 0; int block_y = 0; if (index > -1) { block_y = index / search_block_size; block_x = index - block_y * search_block_size; block_x -= (search_block_size - 1) / 2; block_y -= (search_block_size - 1) / 2 ; } odata[i].x = (show_motion) ? -block_x : block_x; odata[i].y = (show_motion) ? -block_y : block_y; } } //---------------------------------------------------------------------- // Algorithms //---------------------------------------------------------------------- // Requires: block_size <= search_blocksize <= 32 (CUDA max. threads per block = 32 x 32) __host__ void BlockMotionEstimation(const float* iframe_a_data, const float* iframe_b_data, Vec2f* omotion_vector_matrix, const unsigned int matrix_size, const unsigned int block_size, const unsigned int search_block_size, const bool weighting_window, const bool show_motion = false) { // Return immediately in case of wrong size specifications if (block_size > search_block_size|| block_size > 32 || search_block_size > 32) { return; } const unsigned int stream_threads_per_block = 256; const unsigned int search_block_size_squared = search_block_size * search_block_size; // Number of motion estimation blocks const unsigned int matrix_blocks = ceil((float) matrix_size / block_size) * ceil((float) matrix_size / block_size); // Stream sizes of raw frame data and matrix block extraction data const unsigned int frame_stream_size = matrix_size * matrix_size; const unsigned int frame_matrix_block_extraction_stream_size = matrix_blocks * search_block_size_squared; // Actual byte sizes of raw frame data and matrix block extraction data const unsigned int frame_stream_size_real = frame_stream_size * sizeof(Real); const unsigned int frame_matrix_block_extraction_stream_size_complex = frame_matrix_block_extraction_stream_size * sizeof(Complex); // Allocate all device memory Real *frame_data, *frame_a_data, *frame_b_data, *raised_cosine_window; Complex *frame_matrix_block_extraction_complex, *frame_a_matrix_block_extraction_complex, *frame_b_matrix_block_extraction_complex; int *max_indices; Vec2f *motion_vector_matrix; hipMalloc((void**)&frame_data, frame_stream_size_real * 2); hipMalloc((void**)&frame_matrix_block_extraction_complex, frame_matrix_block_extraction_stream_size_complex * 2); hipMalloc((void**)&raised_cosine_window, search_block_size_squared * sizeof(Real)); hipMalloc((void**)&max_indices, matrix_blocks * sizeof(int)); hipMalloc((void**)&motion_vector_matrix, frame_stream_size * sizeof(Vec2f)); frame_a_data = &frame_data[0]; frame_b_data = &frame_data[frame_stream_size]; frame_a_matrix_block_extraction_complex = &frame_matrix_block_extraction_complex[0]; frame_b_matrix_block_extraction_complex = &frame_matrix_block_extraction_complex[frame_matrix_block_extraction_stream_size]; // Transfer input data to device memory hipMemcpy(frame_a_data, iframe_a_data, frame_stream_size_real, hipMemcpyHostToDevice); hipMemcpy(frame_b_data, iframe_b_data, frame_stream_size_real, hipMemcpyHostToDevice); // Prepare matrix block-wise 2D FFT plan hipfftHandle plan_2d_complex; { int rank = 2; int n[] = {search_block_size, search_block_size}; int inembed[] = {0, search_block_size}; int istride = 1; int idist = search_block_size_squared; int onembed[] = {0, search_block_size}; int ostride = 1; int odist = search_block_size_squared; int batch = matrix_blocks; hipfftPlanMany(&plan_2d_complex, rank, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2C, batch); } // Prepare grid, block and shared memory configuration for block matrix extraction const dim3 k1_grid(matrix_blocks); const dim3 k1_block(search_block_size, search_block_size); const unsigned int k1_shared_mem_size = search_block_size_squared * sizeof(Real); if (weighting_window) { // Prepare grid and block configuration for raised cosine window const dim3 k0_grid(ceil(search_block_size_squared / (float) stream_threads_per_block)); const dim3 k0_block(stream_threads_per_block); hipLaunchKernelGGL(( RaisedCosineWindow) , dim3(k0_grid), dim3(k0_block), 0, 0, raised_cosine_window, search_block_size, search_block_size ); // Extract first framed into matrix blocks: overlap, no shift, weighting hipLaunchKernelGGL(( Real2ComplexMatrixBlockExtraction<true, false, true>) , dim3(k1_grid), dim3(k1_block), k1_shared_mem_size, 0, frame_a_data, frame_a_matrix_block_extraction_complex, matrix_size, block_size, search_block_size, raised_cosine_window ); // Extract second frame into matrix blocks: overlap, shift, weighting hipLaunchKernelGGL(( Real2ComplexMatrixBlockExtraction<true, true, true>) , dim3(k1_grid), dim3(k1_block), k1_shared_mem_size, 0, frame_b_data, frame_b_matrix_block_extraction_complex, matrix_size, block_size, search_block_size, raised_cosine_window ); } else { // Extract first framed into matrix blocks: overlap, no shift, no weighting hipLaunchKernelGGL(( Real2ComplexMatrixBlockExtraction<true, false, false>) , dim3(k1_grid), dim3(k1_block), k1_shared_mem_size, 0, frame_a_data, frame_a_matrix_block_extraction_complex, matrix_size, block_size, search_block_size, NULL ); // Extract second frame into matrix blocks: overlap, shift, no weighting hipLaunchKernelGGL(( Real2ComplexMatrixBlockExtraction<true, true, false>) , dim3(k1_grid), dim3(k1_block), k1_shared_mem_size, 0, frame_b_data, frame_b_matrix_block_extraction_complex, matrix_size, block_size, search_block_size, NULL ); } // 2D FFT transformation of both frames' matrix blocks hipfftExecC2C(plan_2d_complex, frame_a_matrix_block_extraction_complex, frame_a_matrix_block_extraction_complex, HIPFFT_FORWARD); hipfftExecC2C(plan_2d_complex, frame_b_matrix_block_extraction_complex, frame_b_matrix_block_extraction_complex, HIPFFT_FORWARD); // Cross correlate the frames' block matrices const dim3 k2_grid(ceil(frame_matrix_block_extraction_stream_size / (float) stream_threads_per_block)); const dim3 k2_block(stream_threads_per_block); hipLaunchKernelGGL(( ComplexPointwiseNormalizedCorrelation) , dim3(k2_grid), dim3(k2_block), 0, 0, frame_a_matrix_block_extraction_complex, frame_b_matrix_block_extraction_complex, frame_matrix_block_extraction_stream_size, search_block_size_squared ); // 2D FFT transformation of resulting correlation map matrix blocks hipfftExecC2C(plan_2d_complex, frame_a_matrix_block_extraction_complex, frame_a_matrix_block_extraction_complex, HIPFFT_BACKWARD); // Prepare block-wise maximum argument reduction const dim3 k3_grid(matrix_blocks); const dim3 k3_block(search_block_size_squared); // Calculate block-wise maximum argument indices hipLaunchKernelGGL(( ArgumentMaximumReduction<false>) , dim3(k3_grid), dim3(k3_block), search_block_size_squared * (2 * sizeof(Complex) + sizeof(int)), 0, frame_a_matrix_block_extraction_complex, max_indices, NULL, 1 ); // Calculate motion vectors from motion indices const dim3 k4_grid(ceil(frame_stream_size / (float) stream_threads_per_block)); const dim3 k4_block(stream_threads_per_block); hipLaunchKernelGGL(( MotionIndices2Matrix) , dim3(k4_grid), dim3(k4_block), 0, 0, max_indices, motion_vector_matrix, frame_stream_size, matrix_size, block_size, search_block_size, show_motion ); // Transfer result back to host memory hipMemcpy(omotion_vector_matrix, motion_vector_matrix, frame_stream_size * sizeof(Vec2f), hipMemcpyDeviceToHost); // Cleanup hipfftDestroy(plan_2d_complex); hipFree(frame_data); hipFree(frame_matrix_block_extraction_complex); hipFree(raised_cosine_window); hipFree(max_indices); hipFree(motion_vector_matrix); } } } }
7b7abe6685313651d1bcff563adfe3ca2bcd2e14.cu
//---------------------------------------------------------------------- /*!\file gpu_algorithms/motionEstimation.cu * * \author Felix Laufer * * * CUDA: Algorithms and kernels for fast phase correlation block matching motion estimation on 2d matrices * */ //---------------------------------------------------------------------- #include <cufft.h> #include "gpu_algorithms/filterKernels.cu" typedef float2 Vec2f; namespace gpu_algorithms { namespace cuda { namespace motion_estimation { //---------------------------------------------------------------------- // Kernel functions //---------------------------------------------------------------------- // Argument of maximum reduction // Requires: blockDim.x = block stream size template<bool param_result_maximums> static __global__ void ArgumentMaximumReduction(const Complex *idata, int *result_indices, Real *result_maximums, const unsigned int maximum_iterations) { extern __shared__ float smem_MaximumArgumentReduction[]; Complex* sdata_cached = (Complex*) smem_MaximumArgumentReduction; Complex* sdata = (Complex*) &sdata_cached[blockDim.x]; int* sindices = (int*) &sdata[blockDim.x]; const unsigned int tid = threadIdx.x; const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata_cached[tid].x = idata[i].x; sdata_cached[tid].y = 0; for (unsigned int n = 0; n < maximum_iterations; ++n) { sdata[tid]= sdata_cached[tid]; sindices[tid] = tid; __syncthreads(); int floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { if (sdata[tid - floorPow2].x < sdata[tid].x) { sdata[tid - floorPow2] = sdata[tid]; sindices[tid - floorPow2] = sindices[tid]; } } __syncthreads(); } for (unsigned int s = floorPow2 >> 1; s > 0; s >>= 1) { if (tid < s) { if (sdata[tid].x < sdata[tid + s].x) { sdata[tid] = sdata[tid + s]; sindices[tid] = sindices[tid + s]; } } __syncthreads(); } if (tid == 0) { const int last_maximum_index = sindices[0]; sdata_cached[last_maximum_index] = (last_maximum_index > 0) ? sdata_cached[0] : sdata_cached[1]; const unsigned int i = blockIdx.x * maximum_iterations + n; result_indices[i] = (sdata[0].x > 0.0f) ? last_maximum_index : -1; if (param_result_maximums) { result_maximums[i] = sdata[0].x; } } __syncthreads(); } } // Real to Complex block extraction with optional overlapping, optional circular shift and optional weighting template<bool param_overlapped, bool param_shift, bool param_weighted> static __global__ void Real2ComplexMatrixBlockExtraction(const Real *idata, Complex *odata, const unsigned int nx, const unsigned int nx_block, const unsigned int nx_search_block, const Real *weights) { extern __shared__ Real smem_Real2ComplexMatrixBlockExtraction[]; const unsigned int blocks_matrices_size = blockDim.x * blockDim.y; const unsigned int blocks_count_x = ceilf((float) nx / nx_block); const unsigned int o_i_block_offset = (blockDim.x - nx_block) / 2; const unsigned int block_id = blockIdx.y * gridDim.x + blockIdx.x; unsigned int idx_x = threadIdx.x; unsigned int idx_y = threadIdx.y; int o_block_x = idx_x; int o_block_y = idx_y; const unsigned int i_block_row = block_id / blocks_count_x; const unsigned int i_block_col = block_id - i_block_row * blocks_count_x; const int i_block_x = o_block_x - o_i_block_offset; const int i_block_y = o_block_y - o_i_block_offset; Real data; if(!param_overlapped && !(0 <= i_block_x && i_block_x < nx_block && 0 <= i_block_y && i_block_y < nx_block)) { data = 0.0f; } else { const int i_matrix_x = i_block_col * nx_block + i_block_x; const int i_matrix_y = i_block_row * nx_block + i_block_y; Real weight = param_weighted ? weights[o_block_y * blockDim.x + o_block_x] : 1.0f; const bool is_valid_coordinate = (0 <= i_matrix_x && i_matrix_x < nx && 0 <= i_matrix_y && i_matrix_y < nx); data = is_valid_coordinate ? idata[i_matrix_y * nx + i_matrix_x] * weight: 0.0f; } const unsigned int i = idx_y * blockDim.x + idx_x; const unsigned int o_offset = block_id * blocks_matrices_size; if (param_shift) { smem_Real2ComplexMatrixBlockExtraction[SequentialIndex2DFFTShift(o_block_x, o_block_y, nx_search_block)] = data; __syncthreads(); odata[o_offset + i].x = smem_Real2ComplexMatrixBlockExtraction[i]; odata[o_offset + i].y = 0.0f; } else { odata[o_offset + i].x = data; odata[o_offset + i].y = 0.0f; } } // Motion indices to aspect matrix static __global__ void MotionIndices2Matrix(const int *idata, Vec2f *odata, const unsigned int size, const unsigned int matrix_size, const unsigned int block_size, const unsigned int search_block_size, const bool show_motion = false) { const unsigned int numThreads = blockDim.x * gridDim.x; const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int blocks_count = ceilf((float) matrix_size / block_size); for (unsigned int i = threadID; i < size; i += numThreads) { const unsigned int y = i / matrix_size; const unsigned int x = i - y * matrix_size; const unsigned int block_row = y / block_size; const unsigned int block_col = x / block_size; const unsigned int block_id = block_row * blocks_count + block_col; const int index = idata[block_id]; int block_x = 0; int block_y = 0; if (index > -1) { block_y = index / search_block_size; block_x = index - block_y * search_block_size; block_x -= (search_block_size - 1) / 2; block_y -= (search_block_size - 1) / 2 ; } odata[i].x = (show_motion) ? -block_x : block_x; odata[i].y = (show_motion) ? -block_y : block_y; } } //---------------------------------------------------------------------- // Algorithms //---------------------------------------------------------------------- // Requires: block_size <= search_blocksize <= 32 (CUDA max. threads per block = 32 x 32) __host__ void BlockMotionEstimation(const float* iframe_a_data, const float* iframe_b_data, Vec2f* omotion_vector_matrix, const unsigned int matrix_size, const unsigned int block_size, const unsigned int search_block_size, const bool weighting_window, const bool show_motion = false) { // Return immediately in case of wrong size specifications if (block_size > search_block_size|| block_size > 32 || search_block_size > 32) { return; } const unsigned int stream_threads_per_block = 256; const unsigned int search_block_size_squared = search_block_size * search_block_size; // Number of motion estimation blocks const unsigned int matrix_blocks = ceil((float) matrix_size / block_size) * ceil((float) matrix_size / block_size); // Stream sizes of raw frame data and matrix block extraction data const unsigned int frame_stream_size = matrix_size * matrix_size; const unsigned int frame_matrix_block_extraction_stream_size = matrix_blocks * search_block_size_squared; // Actual byte sizes of raw frame data and matrix block extraction data const unsigned int frame_stream_size_real = frame_stream_size * sizeof(Real); const unsigned int frame_matrix_block_extraction_stream_size_complex = frame_matrix_block_extraction_stream_size * sizeof(Complex); // Allocate all device memory Real *frame_data, *frame_a_data, *frame_b_data, *raised_cosine_window; Complex *frame_matrix_block_extraction_complex, *frame_a_matrix_block_extraction_complex, *frame_b_matrix_block_extraction_complex; int *max_indices; Vec2f *motion_vector_matrix; cudaMalloc((void**)&frame_data, frame_stream_size_real * 2); cudaMalloc((void**)&frame_matrix_block_extraction_complex, frame_matrix_block_extraction_stream_size_complex * 2); cudaMalloc((void**)&raised_cosine_window, search_block_size_squared * sizeof(Real)); cudaMalloc((void**)&max_indices, matrix_blocks * sizeof(int)); cudaMalloc((void**)&motion_vector_matrix, frame_stream_size * sizeof(Vec2f)); frame_a_data = &frame_data[0]; frame_b_data = &frame_data[frame_stream_size]; frame_a_matrix_block_extraction_complex = &frame_matrix_block_extraction_complex[0]; frame_b_matrix_block_extraction_complex = &frame_matrix_block_extraction_complex[frame_matrix_block_extraction_stream_size]; // Transfer input data to device memory cudaMemcpy(frame_a_data, iframe_a_data, frame_stream_size_real, cudaMemcpyHostToDevice); cudaMemcpy(frame_b_data, iframe_b_data, frame_stream_size_real, cudaMemcpyHostToDevice); // Prepare matrix block-wise 2D FFT plan cufftHandle plan_2d_complex; { int rank = 2; int n[] = {search_block_size, search_block_size}; int inembed[] = {0, search_block_size}; int istride = 1; int idist = search_block_size_squared; int onembed[] = {0, search_block_size}; int ostride = 1; int odist = search_block_size_squared; int batch = matrix_blocks; cufftPlanMany(&plan_2d_complex, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, batch); } // Prepare grid, block and shared memory configuration for block matrix extraction const dim3 k1_grid(matrix_blocks); const dim3 k1_block(search_block_size, search_block_size); const unsigned int k1_shared_mem_size = search_block_size_squared * sizeof(Real); if (weighting_window) { // Prepare grid and block configuration for raised cosine window const dim3 k0_grid(ceil(search_block_size_squared / (float) stream_threads_per_block)); const dim3 k0_block(stream_threads_per_block); RaisedCosineWindow <<<k0_grid, k0_block>>> ( raised_cosine_window, search_block_size, search_block_size ); // Extract first framed into matrix blocks: overlap, no shift, weighting Real2ComplexMatrixBlockExtraction<true, false, true> <<<k1_grid, k1_block, k1_shared_mem_size>>> ( frame_a_data, frame_a_matrix_block_extraction_complex, matrix_size, block_size, search_block_size, raised_cosine_window ); // Extract second frame into matrix blocks: overlap, shift, weighting Real2ComplexMatrixBlockExtraction<true, true, true> <<<k1_grid, k1_block, k1_shared_mem_size>>> ( frame_b_data, frame_b_matrix_block_extraction_complex, matrix_size, block_size, search_block_size, raised_cosine_window ); } else { // Extract first framed into matrix blocks: overlap, no shift, no weighting Real2ComplexMatrixBlockExtraction<true, false, false> <<<k1_grid, k1_block, k1_shared_mem_size>>> ( frame_a_data, frame_a_matrix_block_extraction_complex, matrix_size, block_size, search_block_size, NULL ); // Extract second frame into matrix blocks: overlap, shift, no weighting Real2ComplexMatrixBlockExtraction<true, true, false> <<<k1_grid, k1_block, k1_shared_mem_size>>> ( frame_b_data, frame_b_matrix_block_extraction_complex, matrix_size, block_size, search_block_size, NULL ); } // 2D FFT transformation of both frames' matrix blocks cufftExecC2C(plan_2d_complex, frame_a_matrix_block_extraction_complex, frame_a_matrix_block_extraction_complex, CUFFT_FORWARD); cufftExecC2C(plan_2d_complex, frame_b_matrix_block_extraction_complex, frame_b_matrix_block_extraction_complex, CUFFT_FORWARD); // Cross correlate the frames' block matrices const dim3 k2_grid(ceil(frame_matrix_block_extraction_stream_size / (float) stream_threads_per_block)); const dim3 k2_block(stream_threads_per_block); ComplexPointwiseNormalizedCorrelation <<<k2_grid, k2_block>>> ( frame_a_matrix_block_extraction_complex, frame_b_matrix_block_extraction_complex, frame_matrix_block_extraction_stream_size, search_block_size_squared ); // 2D FFT transformation of resulting correlation map matrix blocks cufftExecC2C(plan_2d_complex, frame_a_matrix_block_extraction_complex, frame_a_matrix_block_extraction_complex, CUFFT_INVERSE); // Prepare block-wise maximum argument reduction const dim3 k3_grid(matrix_blocks); const dim3 k3_block(search_block_size_squared); // Calculate block-wise maximum argument indices ArgumentMaximumReduction<false> <<<k3_grid, k3_block, search_block_size_squared * (2 * sizeof(Complex) + sizeof(int))>>> ( frame_a_matrix_block_extraction_complex, max_indices, NULL, 1 ); // Calculate motion vectors from motion indices const dim3 k4_grid(ceil(frame_stream_size / (float) stream_threads_per_block)); const dim3 k4_block(stream_threads_per_block); MotionIndices2Matrix <<<k4_grid, k4_block>>> ( max_indices, motion_vector_matrix, frame_stream_size, matrix_size, block_size, search_block_size, show_motion ); // Transfer result back to host memory cudaMemcpy(omotion_vector_matrix, motion_vector_matrix, frame_stream_size * sizeof(Vec2f), cudaMemcpyDeviceToHost); // Cleanup cufftDestroy(plan_2d_complex); cudaFree(frame_data); cudaFree(frame_matrix_block_extraction_complex); cudaFree(raised_cosine_window); cudaFree(max_indices); cudaFree(motion_vector_matrix); } } } }
2520828c57853cb883bd62b41d9b637244de3299.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "fill.cuh" #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/gpu_data/gpu_structures.h> namespace NKernel { template<typename T> __global__ void FillBufferImpl(T* buffer, T value, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { buffer[i] = value; i += gridDim.x * blockDim.x; } } template<typename T> void FillBuffer(T* buffer, T value, ui64 size, TCudaStream stream) { if (size > 0) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); FillBufferImpl<T> << < numBlocks, blockSize, 0, stream>> > (buffer, value, size); } } template<typename T> __global__ void MakeSequenceImpl(T offset, T* buffer, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { buffer[i] = offset + i; i += gridDim.x * blockDim.x; } } template<typename T> void MakeSequence(T offset, T* buffer, ui64 size, TCudaStream stream) { if (size > 0) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); MakeSequenceImpl<T> << < numBlocks, blockSize, 0, stream >> > (offset, buffer, size); } } template<typename T> __global__ void InversePermutationImpl(const T* indices, T* dst, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { dst[indices[i]] = i; i += gridDim.x * blockDim.x; } } template<typename T> void InversePermutation(const T* order, T* inverseOrder, ui64 size, TCudaStream stream) { if (size > 0) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); InversePermutationImpl<T> << < numBlocks, blockSize, 0, stream >> > (order, inverseOrder, size); } } template void FillBuffer<char>(char* buffer, char value, ui64 size, TCudaStream stream); template void FillBuffer<unsigned char>(unsigned char* buffer, unsigned char value, ui64 size, TCudaStream stream); template void FillBuffer<short>(short* buffer, short value, ui64 size, TCudaStream stream); template void FillBuffer<ui16>(ui16* buffer, ui16 value, ui64 size, TCudaStream stream); template void FillBuffer<int>(int* buffer, int value, ui64 size, TCudaStream stream); template void FillBuffer<ui32>(ui32* buffer, ui32 value, ui64 size, TCudaStream stream); template void FillBuffer<float>(float* buffer, float value, ui64 size, TCudaStream stream); template void FillBuffer<double>(double* buffer, double value, ui64 size, TCudaStream stream); template void FillBuffer<long>(long* buffer, long value, ui64 size, TCudaStream stream); template void FillBuffer<ui64>(ui64* buffer, ui64 value, ui64 size, TCudaStream stream); template void FillBuffer<TCBinFeature>(TCBinFeature* buffer, TCBinFeature value, ui64 size, TCudaStream stream); template void MakeSequence<int>(int offset, int* buffer, ui64 size, TCudaStream stream); template void MakeSequence<ui32>(ui32 offset, ui32* buffer, ui64 size, TCudaStream stream); template void MakeSequence<ui64>(ui64 offset, ui64* buffer, ui64 size, TCudaStream stream); template void InversePermutation<ui32>(const ui32* order, ui32* inverseOrder, ui64 size, TCudaStream stream); template void InversePermutation<int>(const int* order, int* inverseOrder, ui64 size, TCudaStream stream); }
2520828c57853cb883bd62b41d9b637244de3299.cu
#include "fill.cuh" #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/gpu_data/gpu_structures.h> namespace NKernel { template<typename T> __global__ void FillBufferImpl(T* buffer, T value, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { buffer[i] = value; i += gridDim.x * blockDim.x; } } template<typename T> void FillBuffer(T* buffer, T value, ui64 size, TCudaStream stream) { if (size > 0) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); FillBufferImpl<T> << < numBlocks, blockSize, 0, stream>> > (buffer, value, size); } } template<typename T> __global__ void MakeSequenceImpl(T offset, T* buffer, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { buffer[i] = offset + i; i += gridDim.x * blockDim.x; } } template<typename T> void MakeSequence(T offset, T* buffer, ui64 size, TCudaStream stream) { if (size > 0) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); MakeSequenceImpl<T> << < numBlocks, blockSize, 0, stream >> > (offset, buffer, size); } } template<typename T> __global__ void InversePermutationImpl(const T* indices, T* dst, ui64 size) { ui64 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { dst[indices[i]] = i; i += gridDim.x * blockDim.x; } } template<typename T> void InversePermutation(const T* order, T* inverseOrder, ui64 size, TCudaStream stream) { if (size > 0) { const ui32 blockSize = 512; const ui64 numBlocks = min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()); InversePermutationImpl<T> << < numBlocks, blockSize, 0, stream >> > (order, inverseOrder, size); } } template void FillBuffer<char>(char* buffer, char value, ui64 size, TCudaStream stream); template void FillBuffer<unsigned char>(unsigned char* buffer, unsigned char value, ui64 size, TCudaStream stream); template void FillBuffer<short>(short* buffer, short value, ui64 size, TCudaStream stream); template void FillBuffer<ui16>(ui16* buffer, ui16 value, ui64 size, TCudaStream stream); template void FillBuffer<int>(int* buffer, int value, ui64 size, TCudaStream stream); template void FillBuffer<ui32>(ui32* buffer, ui32 value, ui64 size, TCudaStream stream); template void FillBuffer<float>(float* buffer, float value, ui64 size, TCudaStream stream); template void FillBuffer<double>(double* buffer, double value, ui64 size, TCudaStream stream); template void FillBuffer<long>(long* buffer, long value, ui64 size, TCudaStream stream); template void FillBuffer<ui64>(ui64* buffer, ui64 value, ui64 size, TCudaStream stream); template void FillBuffer<TCBinFeature>(TCBinFeature* buffer, TCBinFeature value, ui64 size, TCudaStream stream); template void MakeSequence<int>(int offset, int* buffer, ui64 size, TCudaStream stream); template void MakeSequence<ui32>(ui32 offset, ui32* buffer, ui64 size, TCudaStream stream); template void MakeSequence<ui64>(ui64 offset, ui64* buffer, ui64 size, TCudaStream stream); template void InversePermutation<ui32>(const ui32* order, ui32* inverseOrder, ui64 size, TCudaStream stream); template void InversePermutation<int>(const int* order, int* inverseOrder, ui64 size, TCudaStream stream); }
2b3261105180f58d519a3c3e69cf0f4dd4e3fdae.hip
// !!! This is a file automatically generated by hipify!!! /******************************* *** *** TASK-3 *** NAME: - SOAIBUZZAMAN *** Matrikel Number: 613488 *********************************/ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> const int N = 200; const int block_size = 32; const int num_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); // Device Function __global__ void calc_max_device(int *vec, int N, int *grid_results) { // Each thread determines the local maximum maxT of its assigned vector elements. int max_t = 0; __shared__ int part_prod[block_size]; int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < N; i += blockDim.x * gridDim.x) { // Max Logic if (vec[i] < vec[tid * block_size + i]) { max_t = vec[tid * block_size + i]; } } part_prod[threadIdx.x] = max_t; __syncthreads(); // The threads of a blockblockIdx.x determine one local maximum maxB // for this block inparallel. This value is stored at the position // grid_results[blockIdx.x]. int size = blockDim.x / 2; while (size > 0) { if (threadIdx.x < size) { // Finding max for blocksize/2 elements if (part_prod[threadIdx.x] < part_prod[threadIdx.x + 1]) part_prod[threadIdx.x] = part_prod[threadIdx.x + 1]; } __syncthreads(); size = size / 2; } // One elements per block if (threadIdx.x == 0) { grid_results[blockIdx.x] = part_prod[0]; } } // Host function int calc_max(int *vec, int N, int block_size, int num_blocks) { int *max_v; int *vec_d, *max_val_d; max_v = (int *)malloc(sizeof(int)); hipMalloc((void **)&vec_d, sizeof(int) * N); hipMalloc((void **)&max_val_d, sizeof(int)); // initialize the vector for testing. for (int i = 0; i < N; i++) { vec[i] = rand() % 100; } hipMemcpy(vec_d, vec, sizeof(int) * N, hipMemcpyHostToDevice); hipMemcpy(max_val_d, max_v, sizeof(int) * N, hipMemcpyHostToDevice); // calling the device funtion hipLaunchKernelGGL(( calc_max_device), dim3(num_blocks), dim3(block_size), 0, 0, vec_d, N, max_val_d); hipMemcpy(max_v, max_val_d, sizeof(int), hipMemcpyDeviceToHost); free(vec); // Couldn't free max_v because it need to be returned. hipFree(vec_d); hipFree(max_val_d); return *max_v; } int main(void) { // Main function for testing int *vec_h, *max_val_h; max_val_h = (int *)malloc(sizeof(int)); vec_h = (int *)malloc(sizeof(int) * N); *max_val_h = calc_max(vec_h, N, block_size, num_blocks); printf("%d\n", *max_val_h); free(max_val_h); return 0; }
2b3261105180f58d519a3c3e69cf0f4dd4e3fdae.cu
/******************************* *** *** TASK-3 *** NAME: - SOAIBUZZAMAN *** Matrikel Number: 613488 *********************************/ #include <stdio.h> #include <stdlib.h> #include <cuda.h> const int N = 200; const int block_size = 32; const int num_blocks = N / block_size + (N % block_size == 0 ? 0 : 1); // Device Function __global__ void calc_max_device(int *vec, int N, int *grid_results) { // Each thread determines the local maximum maxT of its assigned vector elements. int max_t = 0; __shared__ int part_prod[block_size]; int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < N; i += blockDim.x * gridDim.x) { // Max Logic if (vec[i] < vec[tid * block_size + i]) { max_t = vec[tid * block_size + i]; } } part_prod[threadIdx.x] = max_t; __syncthreads(); // The threads of a blockblockIdx.x determine one local maximum maxB // for this block inparallel. This value is stored at the position // grid_results[blockIdx.x]. int size = blockDim.x / 2; while (size > 0) { if (threadIdx.x < size) { // Finding max for blocksize/2 elements if (part_prod[threadIdx.x] < part_prod[threadIdx.x + 1]) part_prod[threadIdx.x] = part_prod[threadIdx.x + 1]; } __syncthreads(); size = size / 2; } // One elements per block if (threadIdx.x == 0) { grid_results[blockIdx.x] = part_prod[0]; } } // Host function int calc_max(int *vec, int N, int block_size, int num_blocks) { int *max_v; int *vec_d, *max_val_d; max_v = (int *)malloc(sizeof(int)); cudaMalloc((void **)&vec_d, sizeof(int) * N); cudaMalloc((void **)&max_val_d, sizeof(int)); // initialize the vector for testing. for (int i = 0; i < N; i++) { vec[i] = rand() % 100; } cudaMemcpy(vec_d, vec, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(max_val_d, max_v, sizeof(int) * N, cudaMemcpyHostToDevice); // calling the device funtion calc_max_device<<<num_blocks, block_size>>>(vec_d, N, max_val_d); cudaMemcpy(max_v, max_val_d, sizeof(int), cudaMemcpyDeviceToHost); free(vec); // Couldn't free max_v because it need to be returned. cudaFree(vec_d); cudaFree(max_val_d); return *max_v; } int main(void) { // Main function for testing int *vec_h, *max_val_h; max_val_h = (int *)malloc(sizeof(int)); vec_h = (int *)malloc(sizeof(int) * N); *max_val_h = calc_max(vec_h, N, block_size, num_blocks); printf("%d\n", *max_val_h); free(max_val_h); return 0; }
3a27167fa383ef7b92a7a2bba3876d704481df6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2022, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/matrix/fbcsr_kernels.hpp" #include <algorithm> #include <thrust/copy.h> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/sort.h> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "common/unified/base/kernel_launch.hpp" #include "core/base/block_sizes.hpp" #include "core/base/device_matrix_data_kernels.hpp" #include "core/components/fill_array_kernels.hpp" #include "core/components/format_conversion_kernels.hpp" #include "core/matrix/csr_lookup.hpp" #include "core/matrix/dense_kernels.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/config.hpp" #include "cuda/base/cublas_bindings.hpp" #include "cuda/base/cusparse_bindings.hpp" #include "cuda/base/cusparse_block_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/pointer_mode_guard.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/merging.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/thread_ids.cuh" #include "cuda/components/uninitialized_array.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The fixed-size block compressed sparse row matrix format namespace. * * @ingroup fbcsr */ namespace fbcsr { constexpr int default_block_size{512}; #include "common/cuda_hip/matrix/csr_common.hpp.inc" #include "common/cuda_hip/matrix/fbcsr_kernels.hpp.inc" namespace { template <typename ValueType> void dense_transpose(std::shared_ptr<const CudaExecutor> exec, const size_type nrows, const size_type ncols, const size_type orig_stride, const ValueType* const orig, const size_type trans_stride, ValueType* const trans) { if (nrows == 0) { return; } if (cublas::is_supported<ValueType>::value) { auto handle = exec->get_cublas_handle(); { cublas::pointer_mode_guard pm_guard(handle); auto alpha = one<ValueType>(); auto beta = zero<ValueType>(); cublas::geam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, nrows, ncols, &alpha, orig, orig_stride, &beta, trans, trans_stride, trans, trans_stride); } } else { GKO_NOT_IMPLEMENTED; } } } // namespace template <typename ValueType, typename IndexType> void spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* const a, const matrix::Dense<ValueType>* const b, matrix::Dense<ValueType>* const c) { if (c->get_size()[0] == 0 || c->get_size()[1] == 0) { // empty output: nothing to do return; } if (b->get_size()[0] == 0 || a->get_num_stored_blocks() == 0) { // empty input: fill output with zero dense::fill(exec, c, zero<ValueType>()); return; } if (cusparse::is_supported<ValueType, IndexType>::value) { auto handle = exec->get_cusparse_handle(); cusparse::pointer_mode_guard pm_guard(handle); const auto alpha = one<ValueType>(); const auto beta = zero<ValueType>(); auto descr = cusparse::create_mat_descr(); const auto row_ptrs = a->get_const_row_ptrs(); const auto col_idxs = a->get_const_col_idxs(); const auto values = a->get_const_values(); const int bs = a->get_block_size(); const IndexType mb = a->get_num_block_rows(); const IndexType nb = a->get_num_block_cols(); const auto nnzb = static_cast<IndexType>(a->get_num_stored_blocks()); const auto nrhs = static_cast<IndexType>(b->get_size()[1]); const auto nrows = a->get_size()[0]; const auto ncols = a->get_size()[1]; const auto in_stride = b->get_stride(); const auto out_stride = c->get_stride(); if (nrhs == 1 && in_stride == 1 && out_stride == 1) { cusparse::bsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, mb, nb, nnzb, &alpha, descr, values, row_ptrs, col_idxs, bs, b->get_const_values(), &beta, c->get_values()); } else { const auto trans_stride = nrows; auto trans_c = array<ValueType>(exec, nrows * nrhs); cusparse::bsrmm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, mb, nrhs, nb, nnzb, &alpha, descr, values, row_ptrs, col_idxs, bs, b->get_const_values(), in_stride, &beta, trans_c.get_data(), trans_stride); dense_transpose(exec, nrhs, nrows, trans_stride, trans_c.get_data(), out_stride, c->get_values()); } cusparse::destroy(descr); } else { GKO_NOT_IMPLEMENTED; } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_FBCSR_SPMV_KERNEL); template <typename ValueType, typename IndexType> void advanced_spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType>* const alpha, const matrix::Fbcsr<ValueType, IndexType>* const a, const matrix::Dense<ValueType>* const b, const matrix::Dense<ValueType>* const beta, matrix::Dense<ValueType>* const c) { if (c->get_size()[0] == 0 || c->get_size()[1] == 0) { // empty output: nothing to do return; } if (b->get_size()[0] == 0 || a->get_num_stored_blocks() == 0) { // empty input: scale output dense::scale(exec, beta, c); return; } if (cusparse::is_supported<ValueType, IndexType>::value) { auto handle = exec->get_cusparse_handle(); const auto alphp = alpha->get_const_values(); const auto betap = beta->get_const_values(); auto descr = cusparse::create_mat_descr(); const auto row_ptrs = a->get_const_row_ptrs(); const auto col_idxs = a->get_const_col_idxs(); const auto values = a->get_const_values(); const int bs = a->get_block_size(); const IndexType mb = a->get_num_block_rows(); const IndexType nb = a->get_num_block_cols(); const auto nnzb = static_cast<IndexType>(a->get_num_stored_blocks()); const auto nrhs = static_cast<IndexType>(b->get_size()[1]); const auto nrows = a->get_size()[0]; const auto ncols = a->get_size()[1]; const auto in_stride = b->get_stride(); const auto out_stride = c->get_stride(); if (nrhs == 1 && in_stride == 1 && out_stride == 1) { cusparse::bsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, mb, nb, nnzb, alphp, descr, values, row_ptrs, col_idxs, bs, b->get_const_values(), betap, c->get_values()); } else { const auto trans_stride = nrows; auto trans_c = array<ValueType>(exec, nrows * nrhs); dense_transpose(exec, nrows, nrhs, out_stride, c->get_values(), trans_stride, trans_c.get_data()); cusparse::bsrmm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, mb, nrhs, nb, nnzb, alphp, descr, values, row_ptrs, col_idxs, bs, b->get_const_values(), in_stride, betap, trans_c.get_data(), trans_stride); dense_transpose(exec, nrhs, nrows, trans_stride, trans_c.get_data(), out_stride, c->get_values()); } cusparse::destroy(descr); } else { GKO_NOT_IMPLEMENTED; } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_ADVANCED_SPMV_KERNEL); template <typename ValueType, typename IndexType> void fill_in_dense(std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* source, matrix::Dense<ValueType>* result) { constexpr auto warps_per_block = default_block_size / config::warp_size; const auto num_blocks = ceildiv(source->get_num_block_rows(), warps_per_block); if (num_blocks > 0) { hipLaunchKernelGGL(( kernel::fill_in_dense), dim3(num_blocks), dim3(default_block_size), 0, 0, source->get_const_row_ptrs(), source->get_const_col_idxs(), as_cuda_type(source->get_const_values()), as_cuda_type(result->get_values()), result->get_stride(), source->get_num_block_rows(), source->get_block_size()); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_FILL_IN_DENSE_KERNEL); template <typename ValueType, typename IndexType> void convert_to_csr(const std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* const source, matrix::Csr<ValueType, IndexType>* const result) { constexpr auto warps_per_block = default_block_size / config::warp_size; const auto num_blocks = ceildiv(source->get_num_block_rows(), warps_per_block); if (num_blocks > 0) { hipLaunchKernelGGL(( kernel::convert_to_csr), dim3(num_blocks), dim3(default_block_size), 0, 0, source->get_const_row_ptrs(), source->get_const_col_idxs(), as_cuda_type(source->get_const_values()), result->get_row_ptrs(), result->get_col_idxs(), as_cuda_type(result->get_values()), source->get_num_block_rows(), source->get_block_size()); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_CONVERT_TO_CSR_KERNEL); namespace { template <int mat_blk_sz, typename ValueType, typename IndexType> void transpose_blocks_impl(syn::value_list<int, mat_blk_sz>, matrix::Fbcsr<ValueType, IndexType>* const mat) { constexpr int subwarp_size = config::warp_size; const auto nbnz = mat->get_num_stored_blocks(); const auto numthreads = nbnz * subwarp_size; const auto block_size = default_block_size; const auto grid_dim = ceildiv(numthreads, block_size); if (grid_dim > 0) { hipLaunchKernelGGL(( kernel::transpose_blocks<mat_blk_sz, subwarp_size>) , dim3(grid_dim), dim3(block_size), 0, 0, nbnz, mat->get_values()); } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_transpose_blocks, transpose_blocks_impl); } // namespace template <typename ValueType, typename IndexType> void transpose(const std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* const orig, matrix::Fbcsr<ValueType, IndexType>* const trans) { if (cusparse::is_supported<ValueType, IndexType>::value) { const int bs = orig->get_block_size(); const IndexType nnzb = static_cast<IndexType>(orig->get_num_stored_blocks()); hipsparseAction_t copyValues = HIPSPARSE_ACTION_NUMERIC; hipsparseIndexBase_t idxBase = HIPSPARSE_INDEX_BASE_ZERO; const IndexType buffer_size = cusparse::bsr_transpose_buffersize( exec->get_cusparse_handle(), orig->get_num_block_rows(), orig->get_num_block_cols(), nnzb, orig->get_const_values(), orig->get_const_row_ptrs(), orig->get_const_col_idxs(), bs, bs); array<char> buffer_array(exec, buffer_size); auto buffer = buffer_array.get_data(); cusparse::bsr_transpose( exec->get_cusparse_handle(), orig->get_num_block_rows(), orig->get_num_block_cols(), nnzb, orig->get_const_values(), orig->get_const_row_ptrs(), orig->get_const_col_idxs(), bs, bs, trans->get_values(), trans->get_col_idxs(), trans->get_row_ptrs(), copyValues, idxBase, buffer); // transpose blocks select_transpose_blocks( fixedblock::compiled_kernels(), [bs](int compiled_block_size) { return bs == compiled_block_size; }, syn::value_list<int>(), syn::type_list<>(), trans); } else { fallback_transpose(exec, orig, trans); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_TRANSPOSE_KERNEL); template <typename ValueType, typename IndexType> void conj_transpose(std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* orig, matrix::Fbcsr<ValueType, IndexType>* trans) { const int grid_size = ceildiv(trans->get_num_stored_elements(), default_block_size); transpose(exec, orig, trans); if (grid_size > 0 && is_complex<ValueType>()) { hipLaunchKernelGGL(( kernel::conjugate), dim3(grid_size), dim3(default_block_size), 0, 0, trans->get_num_stored_elements(), as_cuda_type(trans->get_values())); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_CONJ_TRANSPOSE_KERNEL); template <typename ValueType, typename IndexType> void is_sorted_by_column_index( std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* const to_check, bool* const is_sorted) { *is_sorted = true; auto gpu_array = array<bool>(exec, 1); // need to initialize the GPU value to true exec->copy_from(exec->get_master().get(), 1, is_sorted, gpu_array.get_data()); auto block_size = default_block_size; const auto num_brows = static_cast<IndexType>(to_check->get_num_block_rows()); const auto num_blocks = ceildiv(num_brows, block_size); if (num_blocks > 0) { hipLaunchKernelGGL(( kernel::check_unsorted), dim3(num_blocks), dim3(block_size), 0, 0, to_check->get_const_row_ptrs(), to_check->get_const_col_idxs(), num_brows, gpu_array.get_data()); } *is_sorted = exec->copy_val_to_host(gpu_array.get_data()); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_IS_SORTED_BY_COLUMN_INDEX); template <typename ValueType, typename IndexType> void sort_by_column_index(const std::shared_ptr<const CudaExecutor> exec, matrix::Fbcsr<ValueType, IndexType>* const to_sort) GKO_NOT_IMPLEMENTED; GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_SORT_BY_COLUMN_INDEX); template <typename ValueType, typename IndexType> void extract_diagonal(std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* orig, matrix::Diagonal<ValueType>* diag) GKO_NOT_IMPLEMENTED; GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_EXTRACT_DIAGONAL); } // namespace fbcsr } // namespace cuda } // namespace kernels } // namespace gko
3a27167fa383ef7b92a7a2bba3876d704481df6f.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2022, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/matrix/fbcsr_kernels.hpp" #include <algorithm> #include <thrust/copy.h> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/sort.h> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "common/unified/base/kernel_launch.hpp" #include "core/base/block_sizes.hpp" #include "core/base/device_matrix_data_kernels.hpp" #include "core/components/fill_array_kernels.hpp" #include "core/components/format_conversion_kernels.hpp" #include "core/matrix/csr_lookup.hpp" #include "core/matrix/dense_kernels.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/config.hpp" #include "cuda/base/cublas_bindings.hpp" #include "cuda/base/cusparse_bindings.hpp" #include "cuda/base/cusparse_block_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/pointer_mode_guard.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/merging.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/thread_ids.cuh" #include "cuda/components/uninitialized_array.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The fixed-size block compressed sparse row matrix format namespace. * * @ingroup fbcsr */ namespace fbcsr { constexpr int default_block_size{512}; #include "common/cuda_hip/matrix/csr_common.hpp.inc" #include "common/cuda_hip/matrix/fbcsr_kernels.hpp.inc" namespace { template <typename ValueType> void dense_transpose(std::shared_ptr<const CudaExecutor> exec, const size_type nrows, const size_type ncols, const size_type orig_stride, const ValueType* const orig, const size_type trans_stride, ValueType* const trans) { if (nrows == 0) { return; } if (cublas::is_supported<ValueType>::value) { auto handle = exec->get_cublas_handle(); { cublas::pointer_mode_guard pm_guard(handle); auto alpha = one<ValueType>(); auto beta = zero<ValueType>(); cublas::geam(handle, CUBLAS_OP_T, CUBLAS_OP_N, nrows, ncols, &alpha, orig, orig_stride, &beta, trans, trans_stride, trans, trans_stride); } } else { GKO_NOT_IMPLEMENTED; } } } // namespace template <typename ValueType, typename IndexType> void spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* const a, const matrix::Dense<ValueType>* const b, matrix::Dense<ValueType>* const c) { if (c->get_size()[0] == 0 || c->get_size()[1] == 0) { // empty output: nothing to do return; } if (b->get_size()[0] == 0 || a->get_num_stored_blocks() == 0) { // empty input: fill output with zero dense::fill(exec, c, zero<ValueType>()); return; } if (cusparse::is_supported<ValueType, IndexType>::value) { auto handle = exec->get_cusparse_handle(); cusparse::pointer_mode_guard pm_guard(handle); const auto alpha = one<ValueType>(); const auto beta = zero<ValueType>(); auto descr = cusparse::create_mat_descr(); const auto row_ptrs = a->get_const_row_ptrs(); const auto col_idxs = a->get_const_col_idxs(); const auto values = a->get_const_values(); const int bs = a->get_block_size(); const IndexType mb = a->get_num_block_rows(); const IndexType nb = a->get_num_block_cols(); const auto nnzb = static_cast<IndexType>(a->get_num_stored_blocks()); const auto nrhs = static_cast<IndexType>(b->get_size()[1]); const auto nrows = a->get_size()[0]; const auto ncols = a->get_size()[1]; const auto in_stride = b->get_stride(); const auto out_stride = c->get_stride(); if (nrhs == 1 && in_stride == 1 && out_stride == 1) { cusparse::bsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, mb, nb, nnzb, &alpha, descr, values, row_ptrs, col_idxs, bs, b->get_const_values(), &beta, c->get_values()); } else { const auto trans_stride = nrows; auto trans_c = array<ValueType>(exec, nrows * nrhs); cusparse::bsrmm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, mb, nrhs, nb, nnzb, &alpha, descr, values, row_ptrs, col_idxs, bs, b->get_const_values(), in_stride, &beta, trans_c.get_data(), trans_stride); dense_transpose(exec, nrhs, nrows, trans_stride, trans_c.get_data(), out_stride, c->get_values()); } cusparse::destroy(descr); } else { GKO_NOT_IMPLEMENTED; } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_FBCSR_SPMV_KERNEL); template <typename ValueType, typename IndexType> void advanced_spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType>* const alpha, const matrix::Fbcsr<ValueType, IndexType>* const a, const matrix::Dense<ValueType>* const b, const matrix::Dense<ValueType>* const beta, matrix::Dense<ValueType>* const c) { if (c->get_size()[0] == 0 || c->get_size()[1] == 0) { // empty output: nothing to do return; } if (b->get_size()[0] == 0 || a->get_num_stored_blocks() == 0) { // empty input: scale output dense::scale(exec, beta, c); return; } if (cusparse::is_supported<ValueType, IndexType>::value) { auto handle = exec->get_cusparse_handle(); const auto alphp = alpha->get_const_values(); const auto betap = beta->get_const_values(); auto descr = cusparse::create_mat_descr(); const auto row_ptrs = a->get_const_row_ptrs(); const auto col_idxs = a->get_const_col_idxs(); const auto values = a->get_const_values(); const int bs = a->get_block_size(); const IndexType mb = a->get_num_block_rows(); const IndexType nb = a->get_num_block_cols(); const auto nnzb = static_cast<IndexType>(a->get_num_stored_blocks()); const auto nrhs = static_cast<IndexType>(b->get_size()[1]); const auto nrows = a->get_size()[0]; const auto ncols = a->get_size()[1]; const auto in_stride = b->get_stride(); const auto out_stride = c->get_stride(); if (nrhs == 1 && in_stride == 1 && out_stride == 1) { cusparse::bsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, mb, nb, nnzb, alphp, descr, values, row_ptrs, col_idxs, bs, b->get_const_values(), betap, c->get_values()); } else { const auto trans_stride = nrows; auto trans_c = array<ValueType>(exec, nrows * nrhs); dense_transpose(exec, nrows, nrhs, out_stride, c->get_values(), trans_stride, trans_c.get_data()); cusparse::bsrmm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, mb, nrhs, nb, nnzb, alphp, descr, values, row_ptrs, col_idxs, bs, b->get_const_values(), in_stride, betap, trans_c.get_data(), trans_stride); dense_transpose(exec, nrhs, nrows, trans_stride, trans_c.get_data(), out_stride, c->get_values()); } cusparse::destroy(descr); } else { GKO_NOT_IMPLEMENTED; } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_ADVANCED_SPMV_KERNEL); template <typename ValueType, typename IndexType> void fill_in_dense(std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* source, matrix::Dense<ValueType>* result) { constexpr auto warps_per_block = default_block_size / config::warp_size; const auto num_blocks = ceildiv(source->get_num_block_rows(), warps_per_block); if (num_blocks > 0) { kernel::fill_in_dense<<<num_blocks, default_block_size>>>( source->get_const_row_ptrs(), source->get_const_col_idxs(), as_cuda_type(source->get_const_values()), as_cuda_type(result->get_values()), result->get_stride(), source->get_num_block_rows(), source->get_block_size()); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_FILL_IN_DENSE_KERNEL); template <typename ValueType, typename IndexType> void convert_to_csr(const std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* const source, matrix::Csr<ValueType, IndexType>* const result) { constexpr auto warps_per_block = default_block_size / config::warp_size; const auto num_blocks = ceildiv(source->get_num_block_rows(), warps_per_block); if (num_blocks > 0) { kernel::convert_to_csr<<<num_blocks, default_block_size>>>( source->get_const_row_ptrs(), source->get_const_col_idxs(), as_cuda_type(source->get_const_values()), result->get_row_ptrs(), result->get_col_idxs(), as_cuda_type(result->get_values()), source->get_num_block_rows(), source->get_block_size()); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_CONVERT_TO_CSR_KERNEL); namespace { template <int mat_blk_sz, typename ValueType, typename IndexType> void transpose_blocks_impl(syn::value_list<int, mat_blk_sz>, matrix::Fbcsr<ValueType, IndexType>* const mat) { constexpr int subwarp_size = config::warp_size; const auto nbnz = mat->get_num_stored_blocks(); const auto numthreads = nbnz * subwarp_size; const auto block_size = default_block_size; const auto grid_dim = ceildiv(numthreads, block_size); if (grid_dim > 0) { kernel::transpose_blocks<mat_blk_sz, subwarp_size> <<<grid_dim, block_size, 0, 0>>>(nbnz, mat->get_values()); } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_transpose_blocks, transpose_blocks_impl); } // namespace template <typename ValueType, typename IndexType> void transpose(const std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* const orig, matrix::Fbcsr<ValueType, IndexType>* const trans) { if (cusparse::is_supported<ValueType, IndexType>::value) { const int bs = orig->get_block_size(); const IndexType nnzb = static_cast<IndexType>(orig->get_num_stored_blocks()); cusparseAction_t copyValues = CUSPARSE_ACTION_NUMERIC; cusparseIndexBase_t idxBase = CUSPARSE_INDEX_BASE_ZERO; const IndexType buffer_size = cusparse::bsr_transpose_buffersize( exec->get_cusparse_handle(), orig->get_num_block_rows(), orig->get_num_block_cols(), nnzb, orig->get_const_values(), orig->get_const_row_ptrs(), orig->get_const_col_idxs(), bs, bs); array<char> buffer_array(exec, buffer_size); auto buffer = buffer_array.get_data(); cusparse::bsr_transpose( exec->get_cusparse_handle(), orig->get_num_block_rows(), orig->get_num_block_cols(), nnzb, orig->get_const_values(), orig->get_const_row_ptrs(), orig->get_const_col_idxs(), bs, bs, trans->get_values(), trans->get_col_idxs(), trans->get_row_ptrs(), copyValues, idxBase, buffer); // transpose blocks select_transpose_blocks( fixedblock::compiled_kernels(), [bs](int compiled_block_size) { return bs == compiled_block_size; }, syn::value_list<int>(), syn::type_list<>(), trans); } else { fallback_transpose(exec, orig, trans); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_TRANSPOSE_KERNEL); template <typename ValueType, typename IndexType> void conj_transpose(std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* orig, matrix::Fbcsr<ValueType, IndexType>* trans) { const int grid_size = ceildiv(trans->get_num_stored_elements(), default_block_size); transpose(exec, orig, trans); if (grid_size > 0 && is_complex<ValueType>()) { kernel::conjugate<<<grid_size, default_block_size>>>( trans->get_num_stored_elements(), as_cuda_type(trans->get_values())); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_CONJ_TRANSPOSE_KERNEL); template <typename ValueType, typename IndexType> void is_sorted_by_column_index( std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* const to_check, bool* const is_sorted) { *is_sorted = true; auto gpu_array = array<bool>(exec, 1); // need to initialize the GPU value to true exec->copy_from(exec->get_master().get(), 1, is_sorted, gpu_array.get_data()); auto block_size = default_block_size; const auto num_brows = static_cast<IndexType>(to_check->get_num_block_rows()); const auto num_blocks = ceildiv(num_brows, block_size); if (num_blocks > 0) { kernel::check_unsorted<<<num_blocks, block_size>>>( to_check->get_const_row_ptrs(), to_check->get_const_col_idxs(), num_brows, gpu_array.get_data()); } *is_sorted = exec->copy_val_to_host(gpu_array.get_data()); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_IS_SORTED_BY_COLUMN_INDEX); template <typename ValueType, typename IndexType> void sort_by_column_index(const std::shared_ptr<const CudaExecutor> exec, matrix::Fbcsr<ValueType, IndexType>* const to_sort) GKO_NOT_IMPLEMENTED; GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_SORT_BY_COLUMN_INDEX); template <typename ValueType, typename IndexType> void extract_diagonal(std::shared_ptr<const CudaExecutor> exec, const matrix::Fbcsr<ValueType, IndexType>* orig, matrix::Diagonal<ValueType>* diag) GKO_NOT_IMPLEMENTED; GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_FBCSR_EXTRACT_DIAGONAL); } // namespace fbcsr } // namespace cuda } // namespace kernels } // namespace gko
e96ec4e04b10eb197487cb687b07fbdead4fea9a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "set_value.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float value = 2; float *array = NULL; hipMalloc(&array, XSIZE*YSIZE); unsigned int size = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( set_value), dim3(gridBlock),dim3(threadBlock), 0, 0, value,array,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( set_value), dim3(gridBlock),dim3(threadBlock), 0, 0, value,array,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( set_value), dim3(gridBlock),dim3(threadBlock), 0, 0, value,array,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e96ec4e04b10eb197487cb687b07fbdead4fea9a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "set_value.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float value = 2; float *array = NULL; cudaMalloc(&array, XSIZE*YSIZE); unsigned int size = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); set_value<<<gridBlock,threadBlock>>>(value,array,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { set_value<<<gridBlock,threadBlock>>>(value,array,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { set_value<<<gridBlock,threadBlock>>>(value,array,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bee14097d8fd9827261622208ce4d1c6829d3b98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/transform_reduce.h> #include "kernels/cuda_helpers.h" #include "kernels/tensor_operators.h" #include "3rd_party/reduce_all.h" namespace marian { #define CUDA_FLT_MAX 1.70141e+38 struct isnan_test { __host__ __device__ bool operator()(const float a) const { return isnan(a); } }; __device__ inline float stableLogit(float x) { if(x >= 0) { float z = expf(-x); return 1.0 / (1.0 + z); } else { float z = expf(x); return z / (1.0 + z); } } bool IsNan(Tensor in) { //hipSetDevice(in->getDevice()); //thrust::device_ptr<float> begin = thrust::device_pointer_cast(in->data()); //thrust::device_ptr<float> end // = thrust::device_pointer_cast(in->data() + in->size()); //return thrust::transform_reduce( // begin, end, isnan_test(), 0, thrust::plus<bool>()); return false; } void ConcatCont(Tensor out, const std::vector<Tensor>& inputs, int axis) { hipSetDevice(out->getDevice()); int step = 1; for(int i = 0; i < axis; ++i) step *= out->shape()[i]; size_t offset1 = 0; for(int i = 0; i < step; ++i) { for(auto in : inputs) { size_t size = in->shape().elements() / step; size_t offset2 = i * size; hipMemcpyAsync(out->data() + offset1, in->data() + offset2, size * sizeof(float), hipMemcpyDeviceToDevice); offset1 += size; } } hipStreamSynchronize(0); } __global__ void gInsertCols(float* out, const float* in, size_t rows, size_t cols, size_t cols_out, size_t cols_in, size_t offset_out, size_t offset_in) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOut = out + j * cols_out + offset_out; const float* rowIn = in + j * cols_in + offset_in; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) rowOut[i] = rowIn[i]; } } } } void Concatenate1(Tensor out, const std::vector<Tensor>& inputs) { hipSetDevice(out->getDevice()); int rows = out->shape().elements() / out->shape().back(); size_t offset = 0; int cols_out = out->shape().back(); for(auto in : inputs) { ABORT_IF(rows != in->shape().elements() / in->shape().back(), "First dimension must be equal"); int cols_in = in->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols_in); hipLaunchKernelGGL(( gInsertCols), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), rows, cols_in, cols_out, cols_in, offset, 0); offset += cols_in; } hipStreamSynchronize(0); } void Concatenate(Tensor out, const std::vector<Tensor>& inputs, int ax) { if(ax == out->shape().size() - 1) Concatenate1(out, inputs); else ConcatCont(out, inputs, ax); } void Split1(std::vector<Tensor>& outputs, const Tensor in) { hipSetDevice(in->getDevice()); size_t offset = 0; int rows = in->shape().elements() / in->shape().back(); int cols_in = in->shape().back(); for(auto out : outputs) { ABORT_IF(rows != out->shape().elements() / out->shape().back(), "First dimension must be equal"); int cols_out = out->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols_out); hipLaunchKernelGGL(( gInsertCols), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), rows, cols_out, cols_out, cols_in, 0, offset); offset += cols_out; } hipStreamSynchronize(0); } void SplitCont(std::vector<Tensor>& outputs, const Tensor in, int axis) { hipSetDevice(in->getDevice()); int step = 1; for(int i = 0; i < axis; ++i) step *= in->shape()[i]; size_t offset1 = 0; for(int i = 0; i < step; ++i) { for(auto out : outputs) { size_t size = out->shape().elements() / step; size_t offset2 = i * size; hipMemcpyAsync(out->data() + offset2, in->data() + offset1, size * sizeof(float), hipMemcpyDeviceToDevice); offset1 += size; } } hipStreamSynchronize(0); } void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) { if(ax == in->shape().size() - 1) Split1(outputs, in); else SplitCont(outputs, in, ax); } __global__ void gTransposeND(gpu::Tensor<float> out, const gpu::Tensor<float> in, const gpu::Array<int, gpu::Shape::size()> permute) { constexpr size_t N = gpu::Shape::size(); gpu::Array<int, N> oDims; gpu::Array<int, N> pDims; int length = out.shape().elements(); for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { out.shape().dims(index, oDims); for(int i = 0; i < N; ++i) pDims[permute[i]] = oDims[i]; out[index] = in[pDims]; } } } void TransposeND(Tensor out, Tensor in, const std::vector<int>& vAxis) { hipSetDevice(out->getDevice()); gpu::Array<int, gpu::Shape::size()> axes; int diff = gpu::Shape::size() - vAxis.size(); for(int i = 0; i < axes.size(); ++i) if(i < diff) axes[i] = i; else axes[i] = vAxis[i - diff] + diff; int length = out->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); hipLaunchKernelGGL(( gTransposeND), dim3(blocks), dim3(threads), 0, 0, out, in, axes); } __global__ void gSoftmax(float* out, gpu::Shape outShape, const float* in, const float* mask, const gpu::Shape maskShape) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); bool broadcast = outShape != maskShape; gpu::Array<int, gpu::Shape::size()> dims; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = -CUDA_FLT_MAX; // mask for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float mVal = 1.f; if(mask) { int mIndex = id + j * cols; if(broadcast) { outShape.dims(mIndex, dims); mIndex = maskShape.bindex(dims); } mVal = mask[mIndex]; } if(mVal && sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float mVal = 1.f; if(mask) { int mIndex = id + j * cols; if(broadcast) { outShape.dims(mIndex, dims); mIndex = maskShape.bindex(dims); } mVal = mask[mIndex]; } float ex = 0; if(mVal) ex = __expf(sp[id] - max); so[id] = ex; _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { so[id] = so[id] / _sum[0]; } } } } } void Softmax(Tensor out, Tensor in, Tensor mask) { hipSetDevice(out->getDevice()); size_t m = out->shape().elements() / out->shape().back(); size_t k = out->shape().back(); int blocks = ::min(MAX_BLOCKS, (int)m); int threads = ::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; if(mask) hipLaunchKernelGGL(( gSoftmax), dim3(blocks), dim3(threads), shared, 0, out->data(), out->shape(), in->data(), mask->data(), mask->shape()); else hipLaunchKernelGGL(( gSoftmax), dim3(blocks), dim3(threads), shared, 0, out->data(), out->shape(), in->data(), 0, out->shape()); } __global__ void gLogSoftmax(float* out, const gpu::Shape outShape, const float* in) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float sm = sp[id] - max; float ex = __expf(sm); so[id] = sm; _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) so[id] -= __logf(_sum[0]); } } } } void LogSoftmax(Tensor out, Tensor in) { hipSetDevice(out->getDevice()); size_t m = out->shape().elements() / out->shape().back(); size_t k = out->shape().back(); int blocks = ::min(MAX_BLOCKS, (int)m); int threads = ::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gLogSoftmax), dim3(blocks), dim3(threads), shared, 0, out->data(), out->shape(), in->data()); } /////////////////////////////////////////////////////// __global__ void gSoftmaxGrad(float* grad, const float* adj, const float* val, const int rows, const int cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { extern __shared__ float _share[]; float* _sum = _share + blockDim.x; float* gradRow = grad + j * cols; const float* adjRow = adj + j * cols; const float* valRow = val + j * cols; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += valRow[id] * adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float val = valRow[id] * (adjRow[id] - _sum[0]); if(val) gradRow[id] += val; } } } } } void SoftmaxGrad(Tensor grad, Tensor adj, Tensor val) { hipSetDevice(adj->getDevice()); // grad and val are both m-by-k matrices, passed as input. // A weighted average of each row of grad (according to the weights // specified in val) is computed and subtracted from Out. // adj is multiplied for each element to get backward step in autodiff int m = grad->shape().elements() / grad->shape().back(); int k = grad->shape().back(); int blocks = ::min(MAX_BLOCKS, m); int threads = ::min(MAX_THREADS, k); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gSoftmaxGrad), dim3(blocks), dim3(threads), shared, 0, grad->data(), adj->data(), val->data(), m, k); } __global__ void gLogSoftmaxGrad(float* grad, const float* adj, const float* val, const int rows, const int cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { extern __shared__ float _share[]; float* _sum = _share + blockDim.x; float* gradRow = grad + j * cols; const float* adjRow = adj + j * cols; const float* valRow = val + j * cols; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]); } } } } void LogSoftmaxGrad(Tensor grad, Tensor adj, Tensor val) { hipSetDevice(adj->getDevice()); // grad and val are both m-by-k matrices, passed as input. // A weighted average of each row of grad (according to the weights // specified in val) is computed and subtracted from Out. // adj is multiplied for each element to get backward step in autodiff int m = grad->shape().elements() / grad->shape().back(); int k = grad->shape().back(); int blocks = ::min(MAX_BLOCKS, m); int threads = ::min(MAX_THREADS, k); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gLogSoftmaxGrad), dim3(blocks), dim3(threads), shared, 0, grad->data(), adj->data(), val->data(), m, k); } /////////////////////////////////////////////////////// __global__ void gArgmax(float* out, const float* data, size_t rows, size_t cols) { size_t row = blockIdx.x; size_t startInd = row * cols; float maxScore = -99999; size_t maxInd; for(size_t col = 0; col < cols; ++col) { size_t ind = startInd + col; float score = data[ind]; if(score > maxScore) { maxScore = score; maxInd = col; } } out[row] = maxInd; } /////////////////////////////////////////////////////// void Prod(hipblasHandle_t handle, Tensor C, const Tensor A, const Tensor B, bool transA, bool transB, float beta, float scalar) { hipSetDevice(C->getDevice()); float alpha = scalar; size_t m = A->shape().elements() / A->shape().back(); size_t k = A->shape().back(); if(transA) std::swap(m, k); size_t l = B->shape().elements() / B->shape().back(); size_t n = B->shape().back(); if(transB) std::swap(l, n); size_t lda = A->shape().back(); size_t ldb = B->shape().back(); size_t ldc = B->shape().back(); if(transB) ldc = B->shape().elements() / B->shape().back(); hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N; #if TORCH_HIP_VERSION >= 9000 //cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); #endif hipblasSgemm(handle, opB, opA, n, m, k, &alpha, B->data(), ldb, A->data(), lda, &beta, C->data(), ldc); #if TORCH_HIP_VERSION >= 9000 //cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH); #endif } void ProdBatched(hipblasHandle_t handle, Tensor C, const Tensor A, const Tensor B, bool transA, bool transB, float beta, float scalar) { hipSetDevice(C->getDevice()); float alpha = scalar; size_t batchA = A->shape().elements() / (A->shape()[-1] * A->shape()[-2]); size_t batchB = B->shape().elements() / (B->shape()[-1] * B->shape()[-2]); size_t m = A->shape()[-2]; size_t k = A->shape()[-1]; if(transA) std::swap(m, k); size_t l = B->shape()[-2]; size_t n = B->shape()[-1]; if(transB) std::swap(l, n); size_t lda = A->shape()[-1]; size_t ldb = B->shape()[-1]; size_t ldc = B->shape()[-1]; if(transB) ldc = B->shape()[-2]; hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N; #if TORCH_HIP_VERSION >= 9000 //cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); #endif hipblasSgemmStridedBatched(handle, opB, opA, n, m, k, &alpha, B->data(), ldb, batchB == 1 ? 0 : n * k, A->data(), lda, batchA == 1 ? 0 : m * k, &beta, C->data(), ldc, n * m, ::max(batchA, batchB)); #if TORCH_HIP_VERSION >= 9000 //cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH); #endif } __global__ void gCopyRows(float* out, const float* in, size_t cols, const size_t* sourceRowIdx, size_t rows) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { size_t dstId = j; size_t srcId = sourceRowIdx[j]; float* rowOut = out + dstId * cols; const float* rowIn = in + srcId * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) rowOut[i] = rowIn[i]; } } } } void CopyRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) { hipSetDevice(out->getDevice()); size_t cols = in->shape().back(); size_t rowsToCopy = indices.size(); int threads = ::min(MAX_THREADS, (int)cols); int blocks = ::min(MAX_BLOCKS, (int)rowsToCopy); size_t* d_indices; CUDA_CHECK(hipMalloc(&d_indices, rowsToCopy * sizeof(size_t))); CUDA_CHECK(hipMemcpy(d_indices, indices.data(), rowsToCopy * sizeof(size_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gCopyRows), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), cols, d_indices, rowsToCopy); CUDA_CHECK(hipFree(d_indices)); } __global__ void gPasteRows(float* out, const float* in, size_t cols, const size_t* targetRowIdx, size_t rows) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { size_t dstId = targetRowIdx[j]; size_t srcId = j; float* rowOut = out + dstId * cols; const float* rowIn = in + srcId * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) atomicAdd(rowOut + i, rowIn[i]); } } } } void PasteRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) { hipSetDevice(out->getDevice()); size_t cols = in->shape().back(); size_t rowsToCopy = indices.size(); int threads = ::min(MAX_THREADS, (int)cols); int blocks = ::min(MAX_BLOCKS, (int)rowsToCopy); // @TODO: turn into tensor size_t* d_indices; CUDA_CHECK(hipMalloc(&d_indices, rowsToCopy * sizeof(size_t))); CUDA_CHECK(hipMemcpy(d_indices, indices.data(), rowsToCopy * sizeof(size_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gPasteRows), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), cols, d_indices, rowsToCopy); CUDA_CHECK(hipFree(d_indices)); } ///////////// __global__ void gCopyCols(float* out, const float* in, size_t rows, size_t colsIn, const size_t* sourceColIdx, size_t colsOut) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* rowIn = in + j * colsIn; float* rowOut = out + j * colsOut; for(int tid = 0; tid < colsOut; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < colsOut) rowOut[i] = rowIn[sourceColIdx[i]]; } } } } void CopyCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) { hipSetDevice(out->getDevice()); size_t rows = in->shape().elements() / in->shape().back(); size_t cols = in->shape().back(); size_t colsToCopy = indices.size(); int threads = ::min(MAX_THREADS, (int)colsToCopy); int blocks = ::min(MAX_BLOCKS, (int)rows); size_t* d_indices; CUDA_CHECK(hipMalloc(&d_indices, colsToCopy * sizeof(size_t))); CUDA_CHECK(hipMemcpy(d_indices, indices.data(), colsToCopy * sizeof(size_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gCopyCols), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), rows, cols, d_indices, colsToCopy); CUDA_CHECK(hipFree(d_indices)); } __global__ void gPasteCols(float* out, const float* in, size_t rows, size_t colsOut, const size_t* targetColIdx, size_t colsIn) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* rowIn = in + j * colsIn; float* rowOut = out + j * colsOut; for(int tid = 0; tid < colsIn; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < colsIn) rowOut[targetColIdx[i]] = rowIn[i]; } } } } void PasteCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) { hipSetDevice(out->getDevice()); size_t rows = in->shape().elements() / in->shape().back(); size_t cols = in->shape().back(); size_t colsToCopy = indices.size(); int threads = ::min(MAX_THREADS, (int)colsToCopy); int blocks = ::min(MAX_BLOCKS, (int)rows); size_t* d_indices; CUDA_CHECK(hipMalloc(&d_indices, colsToCopy * sizeof(size_t))); CUDA_CHECK(hipMemcpy(d_indices, indices.data(), colsToCopy * sizeof(size_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gPasteCols), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), rows, cols, d_indices, colsToCopy); CUDA_CHECK(hipFree(d_indices)); } __global__ void gSelect(float* out, gpu::Shape outShape, const float* in, const gpu::Shape inShape, int axis, size_t* d_indices) { int length = outShape.elements(); gpu::Array<int, gpu::Shape::size()> dims; for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { outShape.dims(index, dims); dims[axis] = d_indices[dims[axis]]; int inIndex = inShape.index(dims); out[index] = in[inIndex]; } } } __global__ void gInsert(float* out, gpu::Shape outShape, const float* in, const gpu::Shape inShape, int axis, size_t* d_indices) { int length = inShape.elements(); gpu::Array<int, gpu::Shape::size()> dims; for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { inShape.dims(index, dims); dims[axis] = d_indices[dims[index]]; int outIndex = outShape.index(dims); out[outIndex] = in[index]; } } } void Select(Ptr<Allocator<DeviceGPU>> allocator, Tensor out, const Tensor in, int axis, const std::vector<size_t>& indices) { hipSetDevice(out->getDevice()); int length = out->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); auto mp_indices = allocator->alloc<size_t>(indices.size()); mp_indices->insert(indices.data(), indices.size()); int axisGPU = axis + gpu::Shape::size() - out->shape().size(); hipLaunchKernelGGL(( gSelect), dim3(blocks), dim3(threads), 0, 0, out->data(), out->shape(), in->data(), in->shape(), axisGPU, mp_indices->data<size_t>()); allocator->free(mp_indices); } void Insert(Ptr<Allocator<DeviceGPU>> allocator, Tensor out, const Tensor in, int axis, const std::vector<size_t>& indices) { hipSetDevice(in->getDevice()); int length = in->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); auto mp_indices = allocator->alloc<size_t>(indices.size()); mp_indices->insert(indices.data(), indices.size()); int axisGPU = axis + gpu::Shape::size() - out->shape().size(); hipLaunchKernelGGL(( gInsert), dim3(blocks), dim3(threads), 0, 0, out->data(), out->shape(), in->data(), in->shape(), axisGPU, mp_indices->data<size_t>()); allocator->free(mp_indices); } __global__ void gGRUFastForward(float* out, const float* state, const float* xW, const float* sU, const float* b, const float* mask, size_t rows, size_t cols, bool final) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOut = out + j * cols; const float* rowState = state + j * cols; const float* xWrow = xW + j * cols * 3; const float* sUrow = sU + j * cols * 3; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float r = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float z = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float h; if(final) h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r); else h = tanhf(xWrow[l] + sUrow[l] * r + b[l]); float out = (1.0f - z) * h + z * rowState[i]; rowOut[i] = m * out + (1 - m) * rowState[i]; } } } } } void GRUFastForward(Tensor out, std::vector<Tensor> inputs, bool final) { hipSetDevice(out->getDevice()); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gGRUFastForward), dim3(blocks), dim3(threads), 0, 0, out->data(), // output inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask rows, cols, final); } __global__ void gGRUFastBackward(float* outState, float* outXW, float* outSU, float* outB, const float* state, const float* xW, const float* sU, const float* b, const float* mask, const float* adj, size_t rows, size_t cols, bool final) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOutState = outState + j * cols; float* rowOutXW = outXW + j * cols * 3; float* rowOutSU = outSU + j * cols * 3; const float* rowState = state + j * cols; const float* rowXW = xW + j * cols * 3; const float* rowSU = sU + j * cols * 3; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + cols; int l = i + 2 * cols; float r = stableLogit(rowXW[i] + rowSU[i] + b[i]); float z = stableLogit(rowXW[k] + rowSU[k] + b[k]); float h; if(final) h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r); else h = tanhf(rowXW[l] + rowSU[l] * r + b[l]); float adj = rowAdj[i]; float t = (1 - z) * (1 - h * h); // df/ds if(outState) rowOutState[i] += (m * z - m + 1) * adj; // df/d(xW_r) ... float dfdxW_r = m * r * (1 - r) * t * adj; if(final) dfdxW_r *= rowSU[l] + b[l]; else dfdxW_r *= rowSU[l]; if(outXW) rowOutXW[i] += dfdxW_r; if(outSU) rowOutSU[i] += dfdxW_r; if(outB) atomicAdd(outB + i, dfdxW_r); // df/d(xW_z) ... float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj; if(outXW) rowOutXW[k] += dfdxW_z; if(outSU) rowOutSU[k] += dfdxW_z; if(outB) atomicAdd(outB + k, dfdxW_z); // df/d(xW_x) ... float dfdxW_x = m * t * adj; if(outXW) rowOutXW[l] += dfdxW_x; if(outSU) rowOutSU[l] += dfdxW_x * r; if(outB) if(final) atomicAdd(outB + l, dfdxW_x * r); else atomicAdd(outB + l, dfdxW_x); } } } } } void GRUFastBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj, bool final) { hipSetDevice(adj->getDevice()); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gGRUFastBackward), dim3(blocks), dim3(threads), 0, 0, outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask adj->data(), rows, cols, final); } __global__ void gCrossEntropyPick(float* out, const gpu::Shape outShape, const float* in, const gpu::Shape inShape, const float* pick) { int rows = inShape.elements() / inShape.back(); int cols = inShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 1; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += __expf(sp[id] - max); } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); // cross-entropy for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id == (int)pick[j]) { out[j] = __logf(_sum[0]) - sp[id] + max; } } } } } void CrossEntropyPick(Tensor out, Tensor in, Tensor pick) { hipSetDevice(out->getDevice()); int rows = in->shape().elements() / in->shape().back(); int cols = in->shape().back(); int blocks = ::min(MAX_BLOCKS, (int)rows); int threads = ::min(MAX_THREADS, (int)cols); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gCrossEntropyPick), dim3(blocks), dim3(threads), shared, 0, out->data(), out->shape(), in->data(), in->shape(), pick->data()); } __global__ void gCrossEntropyPickBackward(float* out, const gpu::Shape outShape, const float* adj, const float* in, const float* pick) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* sp = in + j * cols; float* so = out + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 1; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = __expf(sp[id] - max); _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); // cross-entropy for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float sub = (float)(id == (int)pick[j]); so[id] += adj[j] * (__expf(sp[id] - max) / _sum[0] - sub); } } } } } void CrossEntropyPickBackward(Tensor out, Tensor adj, Tensor a, Tensor pick) { hipSetDevice(out->getDevice()); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = ::min(MAX_BLOCKS, (int)rows); int threads = ::min(MAX_THREADS, (int)cols); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gCrossEntropyPickBackward), dim3(blocks), dim3(threads), shared, 0, out->data(), out->shape(), adj->data(), a->data(), pick->data()); } float L2Norm(Tensor in) { using namespace functional; hipSetDevice(in->getDevice()); int size = in->shape().elements(); int threads = ::min(MAX_THREADS, size); int blocks = ::min(MAX_BLOCKS, size / threads + (size % threads != 0)); uint8_t* data; hipMalloc(&data, blocks * sizeof(float)); Tensor out(new TensorBase( New<MemoryPiece>(data, blocks * sizeof(float)), {1, blocks}, in->getDevice())); ReduceAll(_1 * _1, out, in); float dataCpu = sqrtf(out->get(0)); out.reset(); hipFree(data); return dataCpu; } __global__ void gAtt(float* out, const float* va, const float* ctx, const float* state, int m, // total rows (batch x time x beam) int k, // depth int b, // batch size int t // time of ctx ) { int rows = m; int cols = k; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* vaRow = va; const float* ctxRow = ctx + (j % (b * t)) * cols; const float* stateRow = state + ((j / (b * t)) * b + j % b) * cols; extern __shared__ float _share[]; float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float z = ctxRow[id] + stateRow[id]; float ex = tanhf(z) * vaRow[id]; _sum[threadIdx.x] += ex; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); out[j] = _sum[0]; __syncthreads(); } } } void Att(Tensor out, Tensor va, Tensor context, Tensor state) { hipSetDevice(out->getDevice()); size_t m = out->shape().elements() / out->shape().back(); size_t k = context->shape()[-1]; size_t b = context->shape()[-2]; size_t t = context->shape()[-3]; int blocks = ::min(MAX_BLOCKS, (int)m); int threads = ::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gAtt), dim3(blocks), dim3(threads), shared, 0, out->data(), va->data(), context->data(), state->data(), m, k, b, t); } __global__ void gAttBack(float* gVa, float* gContext, float* gState, const float* va, const float* context, const float* state, const float* adj, int m, // rows int k, // cols int n // batch size ) { int rows = m; int cols = k; for(int bid = 0; bid < m; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* gcRow = gContext + j * cols; float* gsRow = gState + (j % n) * cols; const float* cRow = context + j * cols; const float* sRow = state + (j % n) * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float z = cRow[id] + sRow[id]; float t = tanhf(z); float r = va[id] * (1.f - t * t); gcRow[id] += r * adj[j]; gsRow[id] += r * adj[j]; atomicAdd(gVa + id, t * adj[j]); } } } } } void AttBack(Tensor gVa, Tensor gContext, Tensor gState, Tensor va, Tensor context, Tensor state, Tensor adj) { hipSetDevice(adj->getDevice()); size_t m = adj->shape().elements() / adj->shape().back(); size_t dims = context->shape().size(); size_t k = context->shape()[dims - 1]; size_t n = context->shape()[dims - 2]; int blocks = ::min(MAX_BLOCKS, (int)n); int threads = ::min(MAX_THREADS, (int)k); hipLaunchKernelGGL(( gAttBack), dim3(blocks), dim3(threads), 0, 0, gVa->data(), gContext->data(), gState->data(), va->data(), context->data(), state->data(), adj->data(), m, k, n); } __global__ void gLNormalization(float* out, const float* in, const float* alpha, const float* beta, int rows, int cols, float eps = 1e-9) { extern __shared__ float _share[]; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0f; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); float mean = _sum[0] / cols; __syncthreads(); float* _sqSum = _share + blockDim.x; _sqSum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = sp[id] - mean; _sqSum[threadIdx.x] += ex * ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); float sigma = sqrtf(eps + (_sqSum[0] / cols)); __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float t = alpha[id] * ((sp[id] - mean) / sigma); if(beta != nullptr) t += beta[id]; so[id] = t; } } } } } void LayerNormalization(Tensor out, Tensor in, Tensor gamma, Tensor beta, float eps) { hipSetDevice(out->getDevice()); int rows = in->shape().elements() / in->shape().back(); int cols = in->shape().back(); int blocks = ::min(MAX_BLOCKS, (int)rows); int threads = ::min(MAX_THREADS, (int)cols); int shared = 2 * threads * sizeof(float); hipLaunchKernelGGL(( gLNormalization), dim3(blocks), dim3(threads), shared, 0, out->data(), in->data(), gamma->data(), beta ? beta->data() : nullptr, rows, cols, eps); } __global__ void gLayerNormalizationGrad(float* gradX, float* gradGamma, float* gradBeta, float* adj, float* y, float* x, float* gamma, float* beta, int rows, int cols, float eps = 1e-9) { extern __shared__ float shared[]; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* sum_adj = shared; float* sum_adj_x = shared + blockDim.x; float* sum_x = shared + 2 * blockDim.x; float* sum_sqr = shared + 3 * blockDim.x; const float* xRow = x + j * cols; const float* yRow = y + j * cols; const float* adjRow = adj + j * cols; float* gradXRow = gradX + j * cols; sum_x[threadIdx.x] = 0.0f; sum_adj[threadIdx.x] = 0.0f; sum_adj_x[threadIdx.x] = 0.0f; sum_sqr[threadIdx.x] = 0.0f; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { sum_x[threadIdx.x] += xRow[id]; sum_adj_x[threadIdx.x] += adjRow[id] * (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id]; sum_adj[threadIdx.x] += adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { sum_x[threadIdx.x] += sum_x[threadIdx.x + skip]; sum_adj[threadIdx.x] += sum_adj[threadIdx.x + skip]; sum_adj_x[threadIdx.x] += sum_adj_x[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); float mean = sum_x[0] / cols; __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = xRow[id] - mean; sum_sqr[threadIdx.x] += ex * ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) sum_sqr[threadIdx.x] += sum_sqr[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); float sigma = sqrtf(eps + (sum_sqr[0] / cols)); __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float grad_x = 0.0f; float x_hat = (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id]; grad_x += cols * adjRow[id]; grad_x -= sum_adj[0]; grad_x -= sum_adj_x[0] * x_hat; grad_x /= (cols * sigma); float valX = gamma[id] * grad_x; float sign = (0.f < valX) - (valX < 0.f); valX = fabs(valX) > 1000 ? sign * 1000 : valX; gradXRow[id] += valX; atomicAdd(gradGamma + id, adjRow[id] * x_hat); if(beta) { atomicAdd(gradBeta + id, adjRow[id]); } } } } } } void LayerNormalizationGrad(Tensor gradX, Tensor gradGamma, Tensor gradBeta, Tensor adj, Tensor y, Tensor x, Tensor gamma, Tensor beta, float eps) { hipSetDevice(adj->getDevice()); int rows = y->shape().elements() / y->shape().back(); int cols = y->shape().back(); int threads = ::min(MAX_THREADS, cols); int blocks = ::min(MAX_BLOCKS, rows); int shared = sizeof(float) * threads * 4; hipLaunchKernelGGL(( gLayerNormalizationGrad), dim3(blocks), dim3(threads), shared, 0, gradX->data(), gradGamma->data(), (gradBeta) ? gradBeta->data() : nullptr, adj->data(), y->data(), x->data(), gamma->data(), (beta) ? beta->data() : nullptr, rows, cols, eps); } __global__ void gShift(float* out, const float* in, int length, int offset) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { if(index - offset < 0 || index - offset >= length) out[index] = 0; else out[index] = in[index - offset]; } } } void Shift(Tensor out, Tensor in, Shape shift, bool invert) { UTIL_THROW_IF2(in->shape().size() != shift.size(), "bad dimensions"); int offset = 0; for(int i = 0; i < shift.size(); ++i) offset += in->shape().stride(i) * shift[i]; if(invert) offset = -offset; hipSetDevice(out->getDevice()); int length = out->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); hipLaunchKernelGGL(( gShift), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), length, offset); } __global__ void gSetSparse(float* out, const size_t* indices, const float* values, int length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { out[indices[index]] = values[index]; } } } void SetSparse(float* out, const std::vector<size_t>& indices, const std::vector<float>& values) { int length = indices.size(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); size_t* d_indices; CUDA_CHECK(hipMalloc(&d_indices, length * sizeof(size_t))); CUDA_CHECK(hipMemcpy(d_indices, indices.data(), length * sizeof(size_t), hipMemcpyHostToDevice)); float* d_values; CUDA_CHECK(hipMalloc(&d_values, length * sizeof(float))); CUDA_CHECK(hipMemcpy( d_values, values.data(), length * sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gSetSparse), dim3(blocks), dim3(threads), 0, 0, out, d_indices, d_values, length); hipFree(d_indices); hipFree(d_values); } /******************************************************************************/ __global__ void gLSTMCellForward(float* out, const float* cell, const float* xW, const float* sU, const float* b, const float* mask, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOut = out + j * cols; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float gc = tanhf(xWrow[l] + sUrow[l] + b[l]); float cout = gf * rowCell[i] + gi * gc; rowOut[i] = m * cout + (1 - m) * rowCell[i]; } } } } } void LSTMCellForward(Tensor out, std::vector<Tensor> inputs) { hipSetDevice(out->getDevice()); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gLSTMCellForward), dim3(blocks), dim3(threads), 0, 0, out->data(), // output inputs[0]->data(), // cell state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask rows, cols); } __global__ void gLSTMOutputForward(float* out, const float* cell, const float* xW, const float* sU, const float* b, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOut = out + j * cols; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + 3 * cols; float go = stableLogit(xWrow[k] + sUrow[k] + b[k]); rowOut[i] = go * tanhf(rowCell[i]); } } } } } void LSTMOutputForward(Tensor out, std::vector<Tensor> inputs) { hipSetDevice(out->getDevice()); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gLSTMOutputForward), dim3(blocks), dim3(threads), 0, 0, out->data(), // output inputs[0]->data(), // cell state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b rows, cols); } __global__ void gLSTMCellBackward(float* outCell, float* outXW, float* outSU, float* outB, const float* cell, const float* xW, const float* sU, const float* b, const float* mask, const float* adj, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOutCell = outCell + j * cols; float* rowOutXW = outXW + j * cols * 4; float* rowOutSU = outSU + j * cols * 4; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float gc = tanhf(xWrow[l] + sUrow[l] + b[l]); float adj = rowAdj[i]; // dc/dc_{t-1} if(outCell) rowOutCell[i] += (m * gf - m + 1) * adj; // dc/d(b_f) = dc/d(xW_f) ... float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj; if(outXW) rowOutXW[i] += dcdxf; if(outSU) rowOutSU[i] += dcdxf; if(outB) atomicAdd(outB + i, dcdxf); // dc/d(b_i) ... float dcdb_i = m * gc * gi * (1 - gi) * adj; if(outXW) rowOutXW[k] += dcdb_i; if(outSU) rowOutSU[k] += dcdb_i; if(outB) atomicAdd(outB + k, dcdb_i); // dc/d(b_c) ... float dcdxc = m * gi * (1 - gc * gc) * adj; if(outXW) rowOutXW[l] += dcdxc; if(outSU) rowOutSU[l] += dcdxc; if(outB) atomicAdd(outB + l, dcdxc); } } } } } void LSTMCellBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj) { hipSetDevice(adj->getDevice()); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gLSTMCellBackward), dim3(blocks), dim3(threads), 0, 0, outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask adj->data(), rows, cols); } __global__ void gLSTMOutputBackward(float* outCell, float* outXW, float* outSU, float* outB, const float* cell, const float* xW, const float* sU, const float* b, const float* adj, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOutCell = outCell + j * cols; float* rowOutXW = outXW + j * cols * 4; float* rowOutSU = outSU + j * cols * 4; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + 3 * cols; float go = stableLogit(xWrow[k] + sUrow[k] + b[k]); float t = tanhf(rowCell[i]); float adj = rowAdj[i]; // dc/dc_{t-1} if(outCell) rowOutCell[i] += go * (1 - t * t) * adj; // dc/d(b_o) = dc/d(xW_f) ... float dcdxo = t * go * (1 - go) * adj; if(outXW) rowOutXW[k] += dcdxo; if(outSU) rowOutSU[k] += dcdxo; if(outB) atomicAdd(outB + k, dcdxo); } } } } } void LSTMOutputBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj) { hipSetDevice(adj->getDevice()); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gLSTMOutputBackward), dim3(blocks), dim3(threads), 0, 0, outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b adj->data(), rows, cols); } __global__ void gHighwayForward(float* out, const float* in1, const float* in2, const float* t, size_t length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { float sigma = stableLogit(t[index]); out[index] = in1[index] * sigma + in2[index] * (1.f - sigma); } } } void HighwayForward(Tensor out, const Tensor in1, const Tensor in2, const Tensor t) { hipSetDevice(out->getDevice()); int length = out->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); hipLaunchKernelGGL(( gHighwayForward), dim3(blocks), dim3(threads), 0, 0, out->data(), in1->data(), in2->data(), t->data(), length); } __global__ void gHighwayBackward(float* out1, float* out2, float* outt, const float* in1, const float* in2, const float* t, const float* adj, size_t length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { float sigma = stableLogit(t[index]); out1[index] = sigma * adj[index]; out2[index] = (1.f - sigma) * adj[index]; outt[index] = sigma * (1.f - sigma) * (in1[index] - in2[index]) * adj[index]; } } } void HighwayBackward(Tensor out1, Tensor out2, Tensor outt, const Tensor in1, const Tensor in2, const Tensor t, const Tensor adj) { hipSetDevice(out1->getDevice()); int length = out1->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); hipLaunchKernelGGL(( gHighwayBackward), dim3(blocks), dim3(threads), 0, 0, out1->data(), out2->data(), outt->data(), in1->data(), in2->data(), t->data(), adj->data(), length); } __global__ void gMaxPoolingForward(float* out, int outRows, int outCols, float* in, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= outRows * outCols) return; int rowId = tid / outRows; int colId = tid % outRows; float* b = in + (rowId * inCols) + (colId * width); float* localMask = mask + (rowId / numKernels) * maskCols + colId * width; if (colId == outRows - 1) { width = lastWidth; } float currentMax = b[0] * localMask[0]; for (int i = 1; i < width; ++i) { if (b[i] * localMask[i] > currentMax) { currentMax = b[i] * localMask[i]; } } out[rowId + (colId * outCols)] = currentMax; } void PoolingWithMaskingForward(Tensor out, Tensor in, Tensor mask, int width, bool isEven) { int n = out->shape().elements(); int threads = ::min(n, MAX_THREADS); int blocks = n / threads + (n % threads != 0); Shape& inShape = in->shape(); int inRows = inShape[0] * inShape[1]; int inCols = inShape[2]; Shape& outShape = out->shape(); int outRows = outShape[2]; int outCols = outShape[0] * outShape[1]; int lastWidth = ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width; hipLaunchKernelGGL(( gMaxPoolingForward), dim3(blocks), dim3(threads), 0, 0, out->data(), outRows, outCols, in->data(), inRows, inCols, mask->data(), outShape[1], mask->shape()[2], width, lastWidth); } __global__ void gMaxPoolingBackward(float* adj, int adjRows, int adjCols, float* in, float* adjIn, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= adjRows * adjCols) return; int rowId = tid / adjRows; int colId = tid % adjRows; float* b = in + (rowId * inCols) + (colId * width); if (colId == adjRows - 1) { width = lastWidth; } float* localMask = mask + (rowId / numKernels) * maskCols + colId * width; size_t currentMaxIdx = 0; for (int i = 1; i < width; ++i) { if (b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) { currentMaxIdx = i; } } adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx] += adj[rowId + (colId * adjCols)]; } void PoolingWithMaskingBackward(Tensor adj, Tensor adjIn, Tensor in, Tensor mask, int width, bool isEven) { int n = adj->shape().elements(); int threads = ::min(n, 512); int blocks = n / threads + (n % threads != 0); Shape& inShape = in->shape(); int inRows = inShape[0] * inShape[1]; int inCols = inShape[2]; Shape& adjShape = adj->shape(); int adjRows = adjShape[2]; int adjCols = adjShape[0] * adjShape[1]; int lastWidth = ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width; hipLaunchKernelGGL(( gMaxPoolingBackward), dim3(blocks), dim3(threads), 0, 0, adj->data(), adjRows, adjCols, in->data(), adjIn->data(), inRows, inCols, mask->data(), adjShape[1], mask->shape()[2], width, lastWidth); } } // namespace marian
bee14097d8fd9827261622208ce4d1c6829d3b98.cu
#include <thrust/transform_reduce.h> #include "kernels/cuda_helpers.h" #include "kernels/tensor_operators.h" #include "3rd_party/reduce_all.h" namespace marian { #define CUDA_FLT_MAX 1.70141e+38 struct isnan_test { __host__ __device__ bool operator()(const float a) const { return isnan(a); } }; __device__ inline float stableLogit(float x) { if(x >= 0) { float z = expf(-x); return 1.0 / (1.0 + z); } else { float z = expf(x); return z / (1.0 + z); } } bool IsNan(Tensor in) { //cudaSetDevice(in->getDevice()); //thrust::device_ptr<float> begin = thrust::device_pointer_cast(in->data()); //thrust::device_ptr<float> end // = thrust::device_pointer_cast(in->data() + in->size()); //return thrust::transform_reduce( // begin, end, isnan_test(), 0, thrust::plus<bool>()); return false; } void ConcatCont(Tensor out, const std::vector<Tensor>& inputs, int axis) { cudaSetDevice(out->getDevice()); int step = 1; for(int i = 0; i < axis; ++i) step *= out->shape()[i]; size_t offset1 = 0; for(int i = 0; i < step; ++i) { for(auto in : inputs) { size_t size = in->shape().elements() / step; size_t offset2 = i * size; cudaMemcpyAsync(out->data() + offset1, in->data() + offset2, size * sizeof(float), cudaMemcpyDeviceToDevice); offset1 += size; } } cudaStreamSynchronize(0); } __global__ void gInsertCols(float* out, const float* in, size_t rows, size_t cols, size_t cols_out, size_t cols_in, size_t offset_out, size_t offset_in) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOut = out + j * cols_out + offset_out; const float* rowIn = in + j * cols_in + offset_in; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) rowOut[i] = rowIn[i]; } } } } void Concatenate1(Tensor out, const std::vector<Tensor>& inputs) { cudaSetDevice(out->getDevice()); int rows = out->shape().elements() / out->shape().back(); size_t offset = 0; int cols_out = out->shape().back(); for(auto in : inputs) { ABORT_IF(rows != in->shape().elements() / in->shape().back(), "First dimension must be equal"); int cols_in = in->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols_in); gInsertCols<<<blocks, threads>>>( out->data(), in->data(), rows, cols_in, cols_out, cols_in, offset, 0); offset += cols_in; } cudaStreamSynchronize(0); } void Concatenate(Tensor out, const std::vector<Tensor>& inputs, int ax) { if(ax == out->shape().size() - 1) Concatenate1(out, inputs); else ConcatCont(out, inputs, ax); } void Split1(std::vector<Tensor>& outputs, const Tensor in) { cudaSetDevice(in->getDevice()); size_t offset = 0; int rows = in->shape().elements() / in->shape().back(); int cols_in = in->shape().back(); for(auto out : outputs) { ABORT_IF(rows != out->shape().elements() / out->shape().back(), "First dimension must be equal"); int cols_out = out->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols_out); gInsertCols<<<blocks, threads>>>( out->data(), in->data(), rows, cols_out, cols_out, cols_in, 0, offset); offset += cols_out; } cudaStreamSynchronize(0); } void SplitCont(std::vector<Tensor>& outputs, const Tensor in, int axis) { cudaSetDevice(in->getDevice()); int step = 1; for(int i = 0; i < axis; ++i) step *= in->shape()[i]; size_t offset1 = 0; for(int i = 0; i < step; ++i) { for(auto out : outputs) { size_t size = out->shape().elements() / step; size_t offset2 = i * size; cudaMemcpyAsync(out->data() + offset2, in->data() + offset1, size * sizeof(float), cudaMemcpyDeviceToDevice); offset1 += size; } } cudaStreamSynchronize(0); } void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) { if(ax == in->shape().size() - 1) Split1(outputs, in); else SplitCont(outputs, in, ax); } __global__ void gTransposeND(gpu::Tensor<float> out, const gpu::Tensor<float> in, const gpu::Array<int, gpu::Shape::size()> permute) { constexpr size_t N = gpu::Shape::size(); gpu::Array<int, N> oDims; gpu::Array<int, N> pDims; int length = out.shape().elements(); for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { out.shape().dims(index, oDims); for(int i = 0; i < N; ++i) pDims[permute[i]] = oDims[i]; out[index] = in[pDims]; } } } void TransposeND(Tensor out, Tensor in, const std::vector<int>& vAxis) { cudaSetDevice(out->getDevice()); gpu::Array<int, gpu::Shape::size()> axes; int diff = gpu::Shape::size() - vAxis.size(); for(int i = 0; i < axes.size(); ++i) if(i < diff) axes[i] = i; else axes[i] = vAxis[i - diff] + diff; int length = out->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); gTransposeND<<<blocks, threads>>>(out, in, axes); } __global__ void gSoftmax(float* out, gpu::Shape outShape, const float* in, const float* mask, const gpu::Shape maskShape) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); bool broadcast = outShape != maskShape; gpu::Array<int, gpu::Shape::size()> dims; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = -CUDA_FLT_MAX; // mask for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float mVal = 1.f; if(mask) { int mIndex = id + j * cols; if(broadcast) { outShape.dims(mIndex, dims); mIndex = maskShape.bindex(dims); } mVal = mask[mIndex]; } if(mVal && sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float mVal = 1.f; if(mask) { int mIndex = id + j * cols; if(broadcast) { outShape.dims(mIndex, dims); mIndex = maskShape.bindex(dims); } mVal = mask[mIndex]; } float ex = 0; if(mVal) ex = __expf(sp[id] - max); so[id] = ex; _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { so[id] = so[id] / _sum[0]; } } } } } void Softmax(Tensor out, Tensor in, Tensor mask) { cudaSetDevice(out->getDevice()); size_t m = out->shape().elements() / out->shape().back(); size_t k = out->shape().back(); int blocks = std::min(MAX_BLOCKS, (int)m); int threads = std::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; if(mask) gSoftmax<<<blocks, threads, shared>>>( out->data(), out->shape(), in->data(), mask->data(), mask->shape()); else gSoftmax<<<blocks, threads, shared>>>( out->data(), out->shape(), in->data(), 0, out->shape()); } __global__ void gLogSoftmax(float* out, const gpu::Shape outShape, const float* in) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float sm = sp[id] - max; float ex = __expf(sm); so[id] = sm; _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) so[id] -= __logf(_sum[0]); } } } } void LogSoftmax(Tensor out, Tensor in) { cudaSetDevice(out->getDevice()); size_t m = out->shape().elements() / out->shape().back(); size_t k = out->shape().back(); int blocks = std::min(MAX_BLOCKS, (int)m); int threads = std::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; gLogSoftmax<<<blocks, threads, shared>>>( out->data(), out->shape(), in->data()); } /////////////////////////////////////////////////////// __global__ void gSoftmaxGrad(float* grad, const float* adj, const float* val, const int rows, const int cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { extern __shared__ float _share[]; float* _sum = _share + blockDim.x; float* gradRow = grad + j * cols; const float* adjRow = adj + j * cols; const float* valRow = val + j * cols; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += valRow[id] * adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float val = valRow[id] * (adjRow[id] - _sum[0]); if(val) gradRow[id] += val; } } } } } void SoftmaxGrad(Tensor grad, Tensor adj, Tensor val) { cudaSetDevice(adj->getDevice()); // grad and val are both m-by-k matrices, passed as input. // A weighted average of each row of grad (according to the weights // specified in val) is computed and subtracted from Out. // adj is multiplied for each element to get backward step in autodiff int m = grad->shape().elements() / grad->shape().back(); int k = grad->shape().back(); int blocks = std::min(MAX_BLOCKS, m); int threads = std::min(MAX_THREADS, k); int shared = sizeof(float) * threads * 2; gSoftmaxGrad<<<blocks, threads, shared>>>( grad->data(), adj->data(), val->data(), m, k); } __global__ void gLogSoftmaxGrad(float* grad, const float* adj, const float* val, const int rows, const int cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { extern __shared__ float _share[]; float* _sum = _share + blockDim.x; float* gradRow = grad + j * cols; const float* adjRow = adj + j * cols; const float* valRow = val + j * cols; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]); } } } } void LogSoftmaxGrad(Tensor grad, Tensor adj, Tensor val) { cudaSetDevice(adj->getDevice()); // grad and val are both m-by-k matrices, passed as input. // A weighted average of each row of grad (according to the weights // specified in val) is computed and subtracted from Out. // adj is multiplied for each element to get backward step in autodiff int m = grad->shape().elements() / grad->shape().back(); int k = grad->shape().back(); int blocks = std::min(MAX_BLOCKS, m); int threads = std::min(MAX_THREADS, k); int shared = sizeof(float) * threads * 2; gLogSoftmaxGrad<<<blocks, threads, shared>>>( grad->data(), adj->data(), val->data(), m, k); } /////////////////////////////////////////////////////// __global__ void gArgmax(float* out, const float* data, size_t rows, size_t cols) { size_t row = blockIdx.x; size_t startInd = row * cols; float maxScore = -99999; size_t maxInd; for(size_t col = 0; col < cols; ++col) { size_t ind = startInd + col; float score = data[ind]; if(score > maxScore) { maxScore = score; maxInd = col; } } out[row] = maxInd; } /////////////////////////////////////////////////////// void Prod(cublasHandle_t handle, Tensor C, const Tensor A, const Tensor B, bool transA, bool transB, float beta, float scalar) { cudaSetDevice(C->getDevice()); float alpha = scalar; size_t m = A->shape().elements() / A->shape().back(); size_t k = A->shape().back(); if(transA) std::swap(m, k); size_t l = B->shape().elements() / B->shape().back(); size_t n = B->shape().back(); if(transB) std::swap(l, n); size_t lda = A->shape().back(); size_t ldb = B->shape().back(); size_t ldc = B->shape().back(); if(transB) ldc = B->shape().elements() / B->shape().back(); cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N; cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N; #if CUDA_VERSION >= 9000 //cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); #endif cublasSgemm(handle, opB, opA, n, m, k, &alpha, B->data(), ldb, A->data(), lda, &beta, C->data(), ldc); #if CUDA_VERSION >= 9000 //cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH); #endif } void ProdBatched(cublasHandle_t handle, Tensor C, const Tensor A, const Tensor B, bool transA, bool transB, float beta, float scalar) { cudaSetDevice(C->getDevice()); float alpha = scalar; size_t batchA = A->shape().elements() / (A->shape()[-1] * A->shape()[-2]); size_t batchB = B->shape().elements() / (B->shape()[-1] * B->shape()[-2]); size_t m = A->shape()[-2]; size_t k = A->shape()[-1]; if(transA) std::swap(m, k); size_t l = B->shape()[-2]; size_t n = B->shape()[-1]; if(transB) std::swap(l, n); size_t lda = A->shape()[-1]; size_t ldb = B->shape()[-1]; size_t ldc = B->shape()[-1]; if(transB) ldc = B->shape()[-2]; cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N; cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N; #if CUDA_VERSION >= 9000 //cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); #endif cublasSgemmStridedBatched(handle, opB, opA, n, m, k, &alpha, B->data(), ldb, batchB == 1 ? 0 : n * k, A->data(), lda, batchA == 1 ? 0 : m * k, &beta, C->data(), ldc, n * m, std::max(batchA, batchB)); #if CUDA_VERSION >= 9000 //cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH); #endif } __global__ void gCopyRows(float* out, const float* in, size_t cols, const size_t* sourceRowIdx, size_t rows) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { size_t dstId = j; size_t srcId = sourceRowIdx[j]; float* rowOut = out + dstId * cols; const float* rowIn = in + srcId * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) rowOut[i] = rowIn[i]; } } } } void CopyRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) { cudaSetDevice(out->getDevice()); size_t cols = in->shape().back(); size_t rowsToCopy = indices.size(); int threads = std::min(MAX_THREADS, (int)cols); int blocks = std::min(MAX_BLOCKS, (int)rowsToCopy); size_t* d_indices; CUDA_CHECK(cudaMalloc(&d_indices, rowsToCopy * sizeof(size_t))); CUDA_CHECK(cudaMemcpy(d_indices, indices.data(), rowsToCopy * sizeof(size_t), cudaMemcpyHostToDevice)); gCopyRows<<<blocks, threads>>>( out->data(), in->data(), cols, d_indices, rowsToCopy); CUDA_CHECK(cudaFree(d_indices)); } __global__ void gPasteRows(float* out, const float* in, size_t cols, const size_t* targetRowIdx, size_t rows) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { size_t dstId = targetRowIdx[j]; size_t srcId = j; float* rowOut = out + dstId * cols; const float* rowIn = in + srcId * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) atomicAdd(rowOut + i, rowIn[i]); } } } } void PasteRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) { cudaSetDevice(out->getDevice()); size_t cols = in->shape().back(); size_t rowsToCopy = indices.size(); int threads = std::min(MAX_THREADS, (int)cols); int blocks = std::min(MAX_BLOCKS, (int)rowsToCopy); // @TODO: turn into tensor size_t* d_indices; CUDA_CHECK(cudaMalloc(&d_indices, rowsToCopy * sizeof(size_t))); CUDA_CHECK(cudaMemcpy(d_indices, indices.data(), rowsToCopy * sizeof(size_t), cudaMemcpyHostToDevice)); gPasteRows<<<blocks, threads>>>( out->data(), in->data(), cols, d_indices, rowsToCopy); CUDA_CHECK(cudaFree(d_indices)); } ///////////// __global__ void gCopyCols(float* out, const float* in, size_t rows, size_t colsIn, const size_t* sourceColIdx, size_t colsOut) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* rowIn = in + j * colsIn; float* rowOut = out + j * colsOut; for(int tid = 0; tid < colsOut; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < colsOut) rowOut[i] = rowIn[sourceColIdx[i]]; } } } } void CopyCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) { cudaSetDevice(out->getDevice()); size_t rows = in->shape().elements() / in->shape().back(); size_t cols = in->shape().back(); size_t colsToCopy = indices.size(); int threads = std::min(MAX_THREADS, (int)colsToCopy); int blocks = std::min(MAX_BLOCKS, (int)rows); size_t* d_indices; CUDA_CHECK(cudaMalloc(&d_indices, colsToCopy * sizeof(size_t))); CUDA_CHECK(cudaMemcpy(d_indices, indices.data(), colsToCopy * sizeof(size_t), cudaMemcpyHostToDevice)); gCopyCols<<<blocks, threads>>>( out->data(), in->data(), rows, cols, d_indices, colsToCopy); CUDA_CHECK(cudaFree(d_indices)); } __global__ void gPasteCols(float* out, const float* in, size_t rows, size_t colsOut, const size_t* targetColIdx, size_t colsIn) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* rowIn = in + j * colsIn; float* rowOut = out + j * colsOut; for(int tid = 0; tid < colsIn; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < colsIn) rowOut[targetColIdx[i]] = rowIn[i]; } } } } void PasteCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) { cudaSetDevice(out->getDevice()); size_t rows = in->shape().elements() / in->shape().back(); size_t cols = in->shape().back(); size_t colsToCopy = indices.size(); int threads = std::min(MAX_THREADS, (int)colsToCopy); int blocks = std::min(MAX_BLOCKS, (int)rows); size_t* d_indices; CUDA_CHECK(cudaMalloc(&d_indices, colsToCopy * sizeof(size_t))); CUDA_CHECK(cudaMemcpy(d_indices, indices.data(), colsToCopy * sizeof(size_t), cudaMemcpyHostToDevice)); gPasteCols<<<blocks, threads>>>( out->data(), in->data(), rows, cols, d_indices, colsToCopy); CUDA_CHECK(cudaFree(d_indices)); } __global__ void gSelect(float* out, gpu::Shape outShape, const float* in, const gpu::Shape inShape, int axis, size_t* d_indices) { int length = outShape.elements(); gpu::Array<int, gpu::Shape::size()> dims; for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { outShape.dims(index, dims); dims[axis] = d_indices[dims[axis]]; int inIndex = inShape.index(dims); out[index] = in[inIndex]; } } } __global__ void gInsert(float* out, gpu::Shape outShape, const float* in, const gpu::Shape inShape, int axis, size_t* d_indices) { int length = inShape.elements(); gpu::Array<int, gpu::Shape::size()> dims; for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { inShape.dims(index, dims); dims[axis] = d_indices[dims[index]]; int outIndex = outShape.index(dims); out[outIndex] = in[index]; } } } void Select(Ptr<Allocator<DeviceGPU>> allocator, Tensor out, const Tensor in, int axis, const std::vector<size_t>& indices) { cudaSetDevice(out->getDevice()); int length = out->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); auto mp_indices = allocator->alloc<size_t>(indices.size()); mp_indices->insert(indices.data(), indices.size()); int axisGPU = axis + gpu::Shape::size() - out->shape().size(); gSelect<<<blocks, threads>>>(out->data(), out->shape(), in->data(), in->shape(), axisGPU, mp_indices->data<size_t>()); allocator->free(mp_indices); } void Insert(Ptr<Allocator<DeviceGPU>> allocator, Tensor out, const Tensor in, int axis, const std::vector<size_t>& indices) { cudaSetDevice(in->getDevice()); int length = in->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); auto mp_indices = allocator->alloc<size_t>(indices.size()); mp_indices->insert(indices.data(), indices.size()); int axisGPU = axis + gpu::Shape::size() - out->shape().size(); gInsert<<<blocks, threads>>>(out->data(), out->shape(), in->data(), in->shape(), axisGPU, mp_indices->data<size_t>()); allocator->free(mp_indices); } __global__ void gGRUFastForward(float* out, const float* state, const float* xW, const float* sU, const float* b, const float* mask, size_t rows, size_t cols, bool final) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOut = out + j * cols; const float* rowState = state + j * cols; const float* xWrow = xW + j * cols * 3; const float* sUrow = sU + j * cols * 3; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float r = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float z = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float h; if(final) h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r); else h = tanhf(xWrow[l] + sUrow[l] * r + b[l]); float out = (1.0f - z) * h + z * rowState[i]; rowOut[i] = m * out + (1 - m) * rowState[i]; } } } } } void GRUFastForward(Tensor out, std::vector<Tensor> inputs, bool final) { cudaSetDevice(out->getDevice()); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gGRUFastForward<<<blocks, threads>>>( out->data(), // output inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask rows, cols, final); } __global__ void gGRUFastBackward(float* outState, float* outXW, float* outSU, float* outB, const float* state, const float* xW, const float* sU, const float* b, const float* mask, const float* adj, size_t rows, size_t cols, bool final) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOutState = outState + j * cols; float* rowOutXW = outXW + j * cols * 3; float* rowOutSU = outSU + j * cols * 3; const float* rowState = state + j * cols; const float* rowXW = xW + j * cols * 3; const float* rowSU = sU + j * cols * 3; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + cols; int l = i + 2 * cols; float r = stableLogit(rowXW[i] + rowSU[i] + b[i]); float z = stableLogit(rowXW[k] + rowSU[k] + b[k]); float h; if(final) h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r); else h = tanhf(rowXW[l] + rowSU[l] * r + b[l]); float adj = rowAdj[i]; float t = (1 - z) * (1 - h * h); // df/ds if(outState) rowOutState[i] += (m * z - m + 1) * adj; // df/d(xW_r) ... float dfdxW_r = m * r * (1 - r) * t * adj; if(final) dfdxW_r *= rowSU[l] + b[l]; else dfdxW_r *= rowSU[l]; if(outXW) rowOutXW[i] += dfdxW_r; if(outSU) rowOutSU[i] += dfdxW_r; if(outB) atomicAdd(outB + i, dfdxW_r); // df/d(xW_z) ... float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj; if(outXW) rowOutXW[k] += dfdxW_z; if(outSU) rowOutSU[k] += dfdxW_z; if(outB) atomicAdd(outB + k, dfdxW_z); // df/d(xW_x) ... float dfdxW_x = m * t * adj; if(outXW) rowOutXW[l] += dfdxW_x; if(outSU) rowOutSU[l] += dfdxW_x * r; if(outB) if(final) atomicAdd(outB + l, dfdxW_x * r); else atomicAdd(outB + l, dfdxW_x); } } } } } void GRUFastBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj, bool final) { cudaSetDevice(adj->getDevice()); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gGRUFastBackward<<<blocks, threads>>>( outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask adj->data(), rows, cols, final); } __global__ void gCrossEntropyPick(float* out, const gpu::Shape outShape, const float* in, const gpu::Shape inShape, const float* pick) { int rows = inShape.elements() / inShape.back(); int cols = inShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 1; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += __expf(sp[id] - max); } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); // cross-entropy for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id == (int)pick[j]) { out[j] = __logf(_sum[0]) - sp[id] + max; } } } } } void CrossEntropyPick(Tensor out, Tensor in, Tensor pick) { cudaSetDevice(out->getDevice()); int rows = in->shape().elements() / in->shape().back(); int cols = in->shape().back(); int blocks = std::min(MAX_BLOCKS, (int)rows); int threads = std::min(MAX_THREADS, (int)cols); int shared = sizeof(float) * threads * 2; gCrossEntropyPick<<<blocks, threads, shared>>>( out->data(), out->shape(), in->data(), in->shape(), pick->data()); } __global__ void gCrossEntropyPickBackward(float* out, const gpu::Shape outShape, const float* adj, const float* in, const float* pick) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* sp = in + j * cols; float* so = out + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 1; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = __expf(sp[id] - max); _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); // cross-entropy for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float sub = (float)(id == (int)pick[j]); so[id] += adj[j] * (__expf(sp[id] - max) / _sum[0] - sub); } } } } } void CrossEntropyPickBackward(Tensor out, Tensor adj, Tensor a, Tensor pick) { cudaSetDevice(out->getDevice()); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = std::min(MAX_BLOCKS, (int)rows); int threads = std::min(MAX_THREADS, (int)cols); int shared = sizeof(float) * threads * 2; gCrossEntropyPickBackward<<<blocks, threads, shared>>>( out->data(), out->shape(), adj->data(), a->data(), pick->data()); } float L2Norm(Tensor in) { using namespace functional; cudaSetDevice(in->getDevice()); int size = in->shape().elements(); int threads = std::min(MAX_THREADS, size); int blocks = std::min(MAX_BLOCKS, size / threads + (size % threads != 0)); uint8_t* data; cudaMalloc(&data, blocks * sizeof(float)); Tensor out(new TensorBase( New<MemoryPiece>(data, blocks * sizeof(float)), {1, blocks}, in->getDevice())); ReduceAll(_1 * _1, out, in); float dataCpu = sqrtf(out->get(0)); out.reset(); cudaFree(data); return dataCpu; } __global__ void gAtt(float* out, const float* va, const float* ctx, const float* state, int m, // total rows (batch x time x beam) int k, // depth int b, // batch size int t // time of ctx ) { int rows = m; int cols = k; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* vaRow = va; const float* ctxRow = ctx + (j % (b * t)) * cols; const float* stateRow = state + ((j / (b * t)) * b + j % b) * cols; extern __shared__ float _share[]; float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float z = ctxRow[id] + stateRow[id]; float ex = tanhf(z) * vaRow[id]; _sum[threadIdx.x] += ex; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); out[j] = _sum[0]; __syncthreads(); } } } void Att(Tensor out, Tensor va, Tensor context, Tensor state) { cudaSetDevice(out->getDevice()); size_t m = out->shape().elements() / out->shape().back(); size_t k = context->shape()[-1]; size_t b = context->shape()[-2]; size_t t = context->shape()[-3]; int blocks = std::min(MAX_BLOCKS, (int)m); int threads = std::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; gAtt<<<blocks, threads, shared>>>(out->data(), va->data(), context->data(), state->data(), m, k, b, t); } __global__ void gAttBack(float* gVa, float* gContext, float* gState, const float* va, const float* context, const float* state, const float* adj, int m, // rows int k, // cols int n // batch size ) { int rows = m; int cols = k; for(int bid = 0; bid < m; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* gcRow = gContext + j * cols; float* gsRow = gState + (j % n) * cols; const float* cRow = context + j * cols; const float* sRow = state + (j % n) * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float z = cRow[id] + sRow[id]; float t = tanhf(z); float r = va[id] * (1.f - t * t); gcRow[id] += r * adj[j]; gsRow[id] += r * adj[j]; atomicAdd(gVa + id, t * adj[j]); } } } } } void AttBack(Tensor gVa, Tensor gContext, Tensor gState, Tensor va, Tensor context, Tensor state, Tensor adj) { cudaSetDevice(adj->getDevice()); size_t m = adj->shape().elements() / adj->shape().back(); size_t dims = context->shape().size(); size_t k = context->shape()[dims - 1]; size_t n = context->shape()[dims - 2]; int blocks = std::min(MAX_BLOCKS, (int)n); int threads = std::min(MAX_THREADS, (int)k); gAttBack<<<blocks, threads>>>(gVa->data(), gContext->data(), gState->data(), va->data(), context->data(), state->data(), adj->data(), m, k, n); } __global__ void gLNormalization(float* out, const float* in, const float* alpha, const float* beta, int rows, int cols, float eps = 1e-9) { extern __shared__ float _share[]; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0f; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); float mean = _sum[0] / cols; __syncthreads(); float* _sqSum = _share + blockDim.x; _sqSum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = sp[id] - mean; _sqSum[threadIdx.x] += ex * ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); float sigma = sqrtf(eps + (_sqSum[0] / cols)); __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float t = alpha[id] * ((sp[id] - mean) / sigma); if(beta != nullptr) t += beta[id]; so[id] = t; } } } } } void LayerNormalization(Tensor out, Tensor in, Tensor gamma, Tensor beta, float eps) { cudaSetDevice(out->getDevice()); int rows = in->shape().elements() / in->shape().back(); int cols = in->shape().back(); int blocks = std::min(MAX_BLOCKS, (int)rows); int threads = std::min(MAX_THREADS, (int)cols); int shared = 2 * threads * sizeof(float); gLNormalization<<<blocks, threads, shared>>>(out->data(), in->data(), gamma->data(), beta ? beta->data() : nullptr, rows, cols, eps); } __global__ void gLayerNormalizationGrad(float* gradX, float* gradGamma, float* gradBeta, float* adj, float* y, float* x, float* gamma, float* beta, int rows, int cols, float eps = 1e-9) { extern __shared__ float shared[]; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* sum_adj = shared; float* sum_adj_x = shared + blockDim.x; float* sum_x = shared + 2 * blockDim.x; float* sum_sqr = shared + 3 * blockDim.x; const float* xRow = x + j * cols; const float* yRow = y + j * cols; const float* adjRow = adj + j * cols; float* gradXRow = gradX + j * cols; sum_x[threadIdx.x] = 0.0f; sum_adj[threadIdx.x] = 0.0f; sum_adj_x[threadIdx.x] = 0.0f; sum_sqr[threadIdx.x] = 0.0f; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { sum_x[threadIdx.x] += xRow[id]; sum_adj_x[threadIdx.x] += adjRow[id] * (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id]; sum_adj[threadIdx.x] += adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { sum_x[threadIdx.x] += sum_x[threadIdx.x + skip]; sum_adj[threadIdx.x] += sum_adj[threadIdx.x + skip]; sum_adj_x[threadIdx.x] += sum_adj_x[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); float mean = sum_x[0] / cols; __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = xRow[id] - mean; sum_sqr[threadIdx.x] += ex * ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) sum_sqr[threadIdx.x] += sum_sqr[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); float sigma = sqrtf(eps + (sum_sqr[0] / cols)); __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float grad_x = 0.0f; float x_hat = (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id]; grad_x += cols * adjRow[id]; grad_x -= sum_adj[0]; grad_x -= sum_adj_x[0] * x_hat; grad_x /= (cols * sigma); float valX = gamma[id] * grad_x; float sign = (0.f < valX) - (valX < 0.f); valX = fabs(valX) > 1000 ? sign * 1000 : valX; gradXRow[id] += valX; atomicAdd(gradGamma + id, adjRow[id] * x_hat); if(beta) { atomicAdd(gradBeta + id, adjRow[id]); } } } } } } void LayerNormalizationGrad(Tensor gradX, Tensor gradGamma, Tensor gradBeta, Tensor adj, Tensor y, Tensor x, Tensor gamma, Tensor beta, float eps) { cudaSetDevice(adj->getDevice()); int rows = y->shape().elements() / y->shape().back(); int cols = y->shape().back(); int threads = std::min(MAX_THREADS, cols); int blocks = std::min(MAX_BLOCKS, rows); int shared = sizeof(float) * threads * 4; gLayerNormalizationGrad<<<blocks, threads, shared>>>( gradX->data(), gradGamma->data(), (gradBeta) ? gradBeta->data() : nullptr, adj->data(), y->data(), x->data(), gamma->data(), (beta) ? beta->data() : nullptr, rows, cols, eps); } __global__ void gShift(float* out, const float* in, int length, int offset) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { if(index - offset < 0 || index - offset >= length) out[index] = 0; else out[index] = in[index - offset]; } } } void Shift(Tensor out, Tensor in, Shape shift, bool invert) { UTIL_THROW_IF2(in->shape().size() != shift.size(), "bad dimensions"); int offset = 0; for(int i = 0; i < shift.size(); ++i) offset += in->shape().stride(i) * shift[i]; if(invert) offset = -offset; cudaSetDevice(out->getDevice()); int length = out->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); gShift<<<blocks, threads>>>(out->data(), in->data(), length, offset); } __global__ void gSetSparse(float* out, const size_t* indices, const float* values, int length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { out[indices[index]] = values[index]; } } } void SetSparse(float* out, const std::vector<size_t>& indices, const std::vector<float>& values) { int length = indices.size(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); size_t* d_indices; CUDA_CHECK(cudaMalloc(&d_indices, length * sizeof(size_t))); CUDA_CHECK(cudaMemcpy(d_indices, indices.data(), length * sizeof(size_t), cudaMemcpyHostToDevice)); float* d_values; CUDA_CHECK(cudaMalloc(&d_values, length * sizeof(float))); CUDA_CHECK(cudaMemcpy( d_values, values.data(), length * sizeof(float), cudaMemcpyHostToDevice)); gSetSparse<<<blocks, threads>>>(out, d_indices, d_values, length); cudaFree(d_indices); cudaFree(d_values); } /******************************************************************************/ __global__ void gLSTMCellForward(float* out, const float* cell, const float* xW, const float* sU, const float* b, const float* mask, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOut = out + j * cols; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float gc = tanhf(xWrow[l] + sUrow[l] + b[l]); float cout = gf * rowCell[i] + gi * gc; rowOut[i] = m * cout + (1 - m) * rowCell[i]; } } } } } void LSTMCellForward(Tensor out, std::vector<Tensor> inputs) { cudaSetDevice(out->getDevice()); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gLSTMCellForward<<<blocks, threads>>>( out->data(), // output inputs[0]->data(), // cell state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask rows, cols); } __global__ void gLSTMOutputForward(float* out, const float* cell, const float* xW, const float* sU, const float* b, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOut = out + j * cols; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + 3 * cols; float go = stableLogit(xWrow[k] + sUrow[k] + b[k]); rowOut[i] = go * tanhf(rowCell[i]); } } } } } void LSTMOutputForward(Tensor out, std::vector<Tensor> inputs) { cudaSetDevice(out->getDevice()); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gLSTMOutputForward<<<blocks, threads>>>(out->data(), // output inputs[0]->data(), // cell state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b rows, cols); } __global__ void gLSTMCellBackward(float* outCell, float* outXW, float* outSU, float* outB, const float* cell, const float* xW, const float* sU, const float* b, const float* mask, const float* adj, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOutCell = outCell + j * cols; float* rowOutXW = outXW + j * cols * 4; float* rowOutSU = outSU + j * cols * 4; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float gc = tanhf(xWrow[l] + sUrow[l] + b[l]); float adj = rowAdj[i]; // dc/dc_{t-1} if(outCell) rowOutCell[i] += (m * gf - m + 1) * adj; // dc/d(b_f) = dc/d(xW_f) ... float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj; if(outXW) rowOutXW[i] += dcdxf; if(outSU) rowOutSU[i] += dcdxf; if(outB) atomicAdd(outB + i, dcdxf); // dc/d(b_i) ... float dcdb_i = m * gc * gi * (1 - gi) * adj; if(outXW) rowOutXW[k] += dcdb_i; if(outSU) rowOutSU[k] += dcdb_i; if(outB) atomicAdd(outB + k, dcdb_i); // dc/d(b_c) ... float dcdxc = m * gi * (1 - gc * gc) * adj; if(outXW) rowOutXW[l] += dcdxc; if(outSU) rowOutSU[l] += dcdxc; if(outB) atomicAdd(outB + l, dcdxc); } } } } } void LSTMCellBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj) { cudaSetDevice(adj->getDevice()); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gLSTMCellBackward<<<blocks, threads>>>( outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask adj->data(), rows, cols); } __global__ void gLSTMOutputBackward(float* outCell, float* outXW, float* outSU, float* outB, const float* cell, const float* xW, const float* sU, const float* b, const float* adj, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOutCell = outCell + j * cols; float* rowOutXW = outXW + j * cols * 4; float* rowOutSU = outSU + j * cols * 4; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + 3 * cols; float go = stableLogit(xWrow[k] + sUrow[k] + b[k]); float t = tanhf(rowCell[i]); float adj = rowAdj[i]; // dc/dc_{t-1} if(outCell) rowOutCell[i] += go * (1 - t * t) * adj; // dc/d(b_o) = dc/d(xW_f) ... float dcdxo = t * go * (1 - go) * adj; if(outXW) rowOutXW[k] += dcdxo; if(outSU) rowOutSU[k] += dcdxo; if(outB) atomicAdd(outB + k, dcdxo); } } } } } void LSTMOutputBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj) { cudaSetDevice(adj->getDevice()); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gLSTMOutputBackward<<<blocks, threads>>>( outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b adj->data(), rows, cols); } __global__ void gHighwayForward(float* out, const float* in1, const float* in2, const float* t, size_t length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { float sigma = stableLogit(t[index]); out[index] = in1[index] * sigma + in2[index] * (1.f - sigma); } } } void HighwayForward(Tensor out, const Tensor in1, const Tensor in2, const Tensor t) { cudaSetDevice(out->getDevice()); int length = out->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); gHighwayForward<<<blocks, threads>>>( out->data(), in1->data(), in2->data(), t->data(), length); } __global__ void gHighwayBackward(float* out1, float* out2, float* outt, const float* in1, const float* in2, const float* t, const float* adj, size_t length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { float sigma = stableLogit(t[index]); out1[index] = sigma * adj[index]; out2[index] = (1.f - sigma) * adj[index]; outt[index] = sigma * (1.f - sigma) * (in1[index] - in2[index]) * adj[index]; } } } void HighwayBackward(Tensor out1, Tensor out2, Tensor outt, const Tensor in1, const Tensor in2, const Tensor t, const Tensor adj) { cudaSetDevice(out1->getDevice()); int length = out1->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); gHighwayBackward<<<blocks, threads>>>(out1->data(), out2->data(), outt->data(), in1->data(), in2->data(), t->data(), adj->data(), length); } __global__ void gMaxPoolingForward(float* out, int outRows, int outCols, float* in, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= outRows * outCols) return; int rowId = tid / outRows; int colId = tid % outRows; float* b = in + (rowId * inCols) + (colId * width); float* localMask = mask + (rowId / numKernels) * maskCols + colId * width; if (colId == outRows - 1) { width = lastWidth; } float currentMax = b[0] * localMask[0]; for (int i = 1; i < width; ++i) { if (b[i] * localMask[i] > currentMax) { currentMax = b[i] * localMask[i]; } } out[rowId + (colId * outCols)] = currentMax; } void PoolingWithMaskingForward(Tensor out, Tensor in, Tensor mask, int width, bool isEven) { int n = out->shape().elements(); int threads = std::min(n, MAX_THREADS); int blocks = n / threads + (n % threads != 0); Shape& inShape = in->shape(); int inRows = inShape[0] * inShape[1]; int inCols = inShape[2]; Shape& outShape = out->shape(); int outRows = outShape[2]; int outCols = outShape[0] * outShape[1]; int lastWidth = ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width; gMaxPoolingForward<<<blocks, threads>>>( out->data(), outRows, outCols, in->data(), inRows, inCols, mask->data(), outShape[1], mask->shape()[2], width, lastWidth); } __global__ void gMaxPoolingBackward(float* adj, int adjRows, int adjCols, float* in, float* adjIn, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= adjRows * adjCols) return; int rowId = tid / adjRows; int colId = tid % adjRows; float* b = in + (rowId * inCols) + (colId * width); if (colId == adjRows - 1) { width = lastWidth; } float* localMask = mask + (rowId / numKernels) * maskCols + colId * width; size_t currentMaxIdx = 0; for (int i = 1; i < width; ++i) { if (b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) { currentMaxIdx = i; } } adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx] += adj[rowId + (colId * adjCols)]; } void PoolingWithMaskingBackward(Tensor adj, Tensor adjIn, Tensor in, Tensor mask, int width, bool isEven) { int n = adj->shape().elements(); int threads = std::min(n, 512); int blocks = n / threads + (n % threads != 0); Shape& inShape = in->shape(); int inRows = inShape[0] * inShape[1]; int inCols = inShape[2]; Shape& adjShape = adj->shape(); int adjRows = adjShape[2]; int adjCols = adjShape[0] * adjShape[1]; int lastWidth = ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width; gMaxPoolingBackward<<<blocks, threads>>>( adj->data(), adjRows, adjCols, in->data(), adjIn->data(), inRows, inCols, mask->data(), adjShape[1], mask->shape()[2], width, lastWidth); } } // namespace marian
01ffb72ce168c0c9316def1714f0ca54c50145ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include "multiply.cuh" namespace pbw { namespace cuda { namespace kernels { namespace pointwise { __global__ void multiply(const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C, const size_t size) { const size_t grid = blockDim.x * blockIdx.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (size_t index = grid; index < size; index += stride) { const float val_a = A[index]; const float val_b = B[index]; C[index] = val_a * val_b; } } __global__ void multiply(const double* __restrict__ A, const double* __restrict__ B, double* __restrict__ C, const size_t size) { const size_t grid = blockDim.x * blockIdx.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (size_t index = grid; index < size; index += stride) { const double val_a = A[index]; const double val_b = B[index]; C[index] = val_a * val_b; } } } } } }
01ffb72ce168c0c9316def1714f0ca54c50145ee.cu
#pragma once #include "multiply.cuh" namespace pbw { namespace cuda { namespace kernels { namespace pointwise { __global__ void multiply(const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C, const size_t size) { const size_t grid = blockDim.x * blockIdx.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (size_t index = grid; index < size; index += stride) { const float val_a = A[index]; const float val_b = B[index]; C[index] = val_a * val_b; } } __global__ void multiply(const double* __restrict__ A, const double* __restrict__ B, double* __restrict__ C, const size_t size) { const size_t grid = blockDim.x * blockIdx.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (size_t index = grid; index < size; index += stride) { const double val_a = A[index]; const double val_b = B[index]; C[index] = val_a * val_b; } } } } } }
408e1112c78edc61db43fea897360a204460ad95.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" #include "im2col.h" void THNN_CudaSpatialConvolutionMM_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) { THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones); if (bias) { THCUNN_assertSameGPU(state, 2, weight, bias); } THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(!bias || weight->size[0] == bias->size[0], 4, "nOutputPlane mismatch in weight and bias"); THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero"); // Params: int nInputPlane = weight->size[1]/(kH*kW); int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { THArgCheck(input->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match"); // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); } else { THArgCheck(input->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match"); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth); // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets increased, // and always contains ones. if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *output_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, output_n, output, 0, elt); // Do Bias first: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long n_ = outputHeight * outputWidth; long k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) if (bias) { THCudaBlas_Sgemm( state, 't', 'n', n_, m_, k_, 1, THCudaTensor_data(state, ones), k_, THCudaTensor_data(state, bias), k_, 0, THCudaTensor_data(state, output_n), n_ ); } else { THCudaTensor_zero(state, output_n); } // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, 1, 1, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = columns->size[1]; long k = nInputPlane*kH*kW; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 'n', 'n', n, m, k, 1, THCudaTensor_data(state, columns), n, THCudaTensor_data(state, weight), k, 1, THCudaTensor_data(state, output_n), n ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, output_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, output, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialConvolutionMM_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, THCudaTensor *gradColumns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) { THCUNN_assertSameGPU(state, 5, input, gradOutput, weight, gradColumns, gradInput); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero"); // Params int nInputPlane = weight->size[1]/(kW*kH); int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth); // Resize temporary columns THCudaTensor_resize2d(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *gradInput_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: THCudaTensor_select(state, gradInput_n, gradInput, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nInputPlane*kW*kH; long n = gradColumns->size[1]; long k = nOutputPlane; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 'n', 't', n, m, k, 1, THCudaTensor_data(state, gradOutput_n), n, THCudaTensor_data(state, weight), m, 0, THCudaTensor_data(state, gradColumns), n ); // Unpack columns back into input: col2im( THCState_getCurrentStream(state), THCudaTensor_data(state, gradColumns), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, 1, 1, THCudaTensor_data(state, gradInput_n) ); } // Free THCudaTensor_free(state, gradInput_n); THCudaTensor_free(state, gradOutput_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); THCudaTensor_resize3d(state, gradInput, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialConvolutionMM_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, float scale) { THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, columns, ones); if (gradBias) { THCUNN_assertSameGPU(state, 2, gradWeight, gradBias); } THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(gradWeight->nDimension == 2, 4, "gradWeight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(!gradBias || gradWeight->size[0] == gradBias->size[0], 4, "nOutputPlane mismatch in gradWeight and gradBias"); THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero"); // Params int nInputPlane = gradWeight->size[1]/(kW*kH); int nOutputPlane = gradWeight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Define a buffer of ones, for bias accumulation if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, 1, 1, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = nInputPlane*kW*kH; long k = columns->size[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 't', 'n', n, m, k, scale, THCudaTensor_data(state, columns), k, THCudaTensor_data(state, gradOutput_n), k, 1, THCudaTensor_data(state, gradWeight), n ); // Do Bias: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long k_ = outputHeight * outputWidth; // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) if (gradBias) { THCudaBlas_Sgemv( state, 't', k_, m_, scale, THCudaTensor_data(state, gradOutput_n), k_, THCudaTensor_data(state, ones), 1, 1, THCudaTensor_data(state, gradBias), 1 ); } } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, gradOutput_n); // Resize if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } }
408e1112c78edc61db43fea897360a204460ad95.cu
#include "THCUNN.h" #include "common.h" #include "im2col.h" void THNN_CudaSpatialConvolutionMM_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) { THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones); if (bias) { THCUNN_assertSameGPU(state, 2, weight, bias); } THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(!bias || weight->size[0] == bias->size[0], 4, "nOutputPlane mismatch in weight and bias"); THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero"); // Params: int nInputPlane = weight->size[1]/(kH*kW); int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { THArgCheck(input->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match"); // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); } else { THArgCheck(input->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match"); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth); // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets increased, // and always contains ones. if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *output_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, output_n, output, 0, elt); // Do Bias first: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long n_ = outputHeight * outputWidth; long k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) if (bias) { THCudaBlas_Sgemm( state, 't', 'n', n_, m_, k_, 1, THCudaTensor_data(state, ones), k_, THCudaTensor_data(state, bias), k_, 0, THCudaTensor_data(state, output_n), n_ ); } else { THCudaTensor_zero(state, output_n); } // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, 1, 1, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = columns->size[1]; long k = nInputPlane*kH*kW; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 'n', 'n', n, m, k, 1, THCudaTensor_data(state, columns), n, THCudaTensor_data(state, weight), k, 1, THCudaTensor_data(state, output_n), n ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, output_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, output, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialConvolutionMM_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, THCudaTensor *gradColumns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) { THCUNN_assertSameGPU(state, 5, input, gradOutput, weight, gradColumns, gradInput); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero"); // Params int nInputPlane = weight->size[1]/(kW*kH); int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth); // Resize temporary columns THCudaTensor_resize2d(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *gradInput_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: THCudaTensor_select(state, gradInput_n, gradInput, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nInputPlane*kW*kH; long n = gradColumns->size[1]; long k = nOutputPlane; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 'n', 't', n, m, k, 1, THCudaTensor_data(state, gradOutput_n), n, THCudaTensor_data(state, weight), m, 0, THCudaTensor_data(state, gradColumns), n ); // Unpack columns back into input: col2im( THCState_getCurrentStream(state), THCudaTensor_data(state, gradColumns), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, 1, 1, THCudaTensor_data(state, gradInput_n) ); } // Free THCudaTensor_free(state, gradInput_n); THCudaTensor_free(state, gradOutput_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); THCudaTensor_resize3d(state, gradInput, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialConvolutionMM_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, float scale) { THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, columns, ones); if (gradBias) { THCUNN_assertSameGPU(state, 2, gradWeight, gradBias); } THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(gradWeight->nDimension == 2, 4, "gradWeight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(!gradBias || gradWeight->size[0] == gradBias->size[0], 4, "nOutputPlane mismatch in gradWeight and gradBias"); THArgCheck(kW > 0 && kH > 0, 8, "kernel size should be greater than zero"); THArgCheck(dW > 0 && dH > 0, 10, "stride should be greater than zero"); // Params int nInputPlane = gradWeight->size[1]/(kW*kH); int nOutputPlane = gradWeight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Define a buffer of ones, for bias accumulation if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, 1, 1, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = nInputPlane*kW*kH; long k = columns->size[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 't', 'n', n, m, k, scale, THCudaTensor_data(state, columns), k, THCudaTensor_data(state, gradOutput_n), k, 1, THCudaTensor_data(state, gradWeight), n ); // Do Bias: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long k_ = outputHeight * outputWidth; // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) if (gradBias) { THCudaBlas_Sgemv( state, 't', k_, m_, scale, THCudaTensor_data(state, gradOutput_n), k_, THCudaTensor_data(state, ones), 1, 1, THCudaTensor_data(state, gradBias), 1 ); } } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, gradOutput_n); // Resize if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } }
19d34989e633af3ad5b3fc145cebfefb6cf165d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <vector> #include <iostream> #include "TimerGuard.h" __global__ void matMulVecKernel(float* resultVec, float* mat, float* vec, size_t n, size_t m) { size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n) { float t = 0.0; for (size_t i = 0; i < m; ++i) { t = t + mat[idx * m + i] * vec[i]; } resultVec[idx] = t; } } void matMulVecGPU(float* h_res, float* h_mat, float* h_vec, size_t n, size_t m) { float* d_mat; hipError_t err; if ((err = hipMalloc((void**)&d_mat, n * m * sizeof(float))) != hipSuccess) { std::cerr << "cannot alloc mem for mat\n"; std::exit(1); } if((err = hipMemcpy(d_mat, h_mat, n * m * sizeof(float), hipMemcpyHostToDevice)) != hipSuccess) { std::cerr << "cannot memcpy mat to device\n"; std::exit(1); } float* d_vec; hipMalloc((void**)&d_vec, m * sizeof(float)); hipMemcpy(d_vec, h_vec, m * sizeof(float), hipMemcpyHostToDevice); float* d_res; hipMalloc((void**)&d_res, n * sizeof(float)); { TimerGuard tg("only the kernel: "); hipLaunchKernelGGL(( matMulVecKernel), dim3(::ceil(n/1024.0)), dim3(1024), 0, 0, d_res, d_mat, d_vec, n, m); std::cerr << "nonsense\n"; } hipMemcpy(h_res, d_res, n * sizeof(float), hipMemcpyDeviceToHost); hipFree(d_mat); hipFree(d_vec); hipFree(d_res); } void matMulVecCPU(float* h_res, float* h_mat, float* h_vec, size_t n, size_t m) { for (size_t i = 0; i < n; ++i) { h_res[i] = 0.0; for (size_t j = 0; j < m; ++j) { h_res[i] += h_mat[i * m + j] * h_vec[j]; } } } template <typename Cont> bool nearlyEq(Cont const& c1, Cont const& c2) // requires c1.size() c1[] { for (size_t i = 0; i < c1.size(); ++i) { if (std::abs(c1[i] - c2[i]) > 0.01) return false; } return true; } int main() { constexpr size_t n = 20000, m = 50000; std::vector<float> mat(n * m, 1.1f); std::vector<float> vec(m, 2.2f); std::vector<float> res1(n, 0.0f); std::vector<float> res2(n, 0.0f); { TimerGuard tg("gpu: "); matMulVecGPU(res1.data(), mat.data(), vec.data(), n, m); } { TimerGuard tg("cpu: "); matMulVecCPU(res2.data(), mat.data(), vec.data(), n, m); } if (!nearlyEq(res1, res2)) { std::cout << "result not eq\n"; } }
19d34989e633af3ad5b3fc145cebfefb6cf165d4.cu
#include <cmath> #include <vector> #include <iostream> #include "TimerGuard.h" __global__ void matMulVecKernel(float* resultVec, float* mat, float* vec, size_t n, size_t m) { size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n) { float t = 0.0; for (size_t i = 0; i < m; ++i) { t = t + mat[idx * m + i] * vec[i]; } resultVec[idx] = t; } } void matMulVecGPU(float* h_res, float* h_mat, float* h_vec, size_t n, size_t m) { float* d_mat; cudaError_t err; if ((err = cudaMalloc((void**)&d_mat, n * m * sizeof(float))) != cudaSuccess) { std::cerr << "cannot alloc mem for mat\n"; std::exit(1); } if((err = cudaMemcpy(d_mat, h_mat, n * m * sizeof(float), cudaMemcpyHostToDevice)) != cudaSuccess) { std::cerr << "cannot memcpy mat to device\n"; std::exit(1); } float* d_vec; cudaMalloc((void**)&d_vec, m * sizeof(float)); cudaMemcpy(d_vec, h_vec, m * sizeof(float), cudaMemcpyHostToDevice); float* d_res; cudaMalloc((void**)&d_res, n * sizeof(float)); { TimerGuard tg("only the kernel: "); matMulVecKernel<<<std::ceil(n/1024.0), 1024>>>(d_res, d_mat, d_vec, n, m); std::cerr << "nonsense\n"; } cudaMemcpy(h_res, d_res, n * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_mat); cudaFree(d_vec); cudaFree(d_res); } void matMulVecCPU(float* h_res, float* h_mat, float* h_vec, size_t n, size_t m) { for (size_t i = 0; i < n; ++i) { h_res[i] = 0.0; for (size_t j = 0; j < m; ++j) { h_res[i] += h_mat[i * m + j] * h_vec[j]; } } } template <typename Cont> bool nearlyEq(Cont const& c1, Cont const& c2) // requires c1.size() c1[] { for (size_t i = 0; i < c1.size(); ++i) { if (std::abs(c1[i] - c2[i]) > 0.01) return false; } return true; } int main() { constexpr size_t n = 20000, m = 50000; std::vector<float> mat(n * m, 1.1f); std::vector<float> vec(m, 2.2f); std::vector<float> res1(n, 0.0f); std::vector<float> res2(n, 0.0f); { TimerGuard tg("gpu: "); matMulVecGPU(res1.data(), mat.data(), vec.data(), n, m); } { TimerGuard tg("cpu: "); matMulVecCPU(res2.data(), mat.data(), vec.data(), n, m); } if (!nearlyEq(res1, res2)) { std::cout << "result not eq\n"; } }
1a029b3becff100c72de9b774d2d774855e26e40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_cosf (size_t n, float *result, float *x) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = cosf(x[id]); } }
1a029b3becff100c72de9b774d2d774855e26e40.cu
#include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_cosf (size_t n, float *result, float *x) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = cosf(x[id]); } }
df50617e70a6404ec1e84af8924cd19cec466649.hip
// !!! This is a file automatically generated by hipify!!! /** ExaTN: Tensor Runtime: Tensor network executor: NVIDIA cuQuantum REVISION: 2023/05/15 Copyright (C) 2018-2023 Dmitry Lyakh Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) Copyright (C) 2022-2023 NVIDIA Corporation SPDX-License-Identifier: BSD-3-Clause **/ #ifdef CUQUANTUM #include <cutensornet.h> #include <cutensor.h> #include <hip/hip_runtime.h> #ifdef MPI_ENABLED #include "mpi.h" #endif #include <algorithm> #include <vector> #include <unordered_map> #include <numeric> #include <type_traits> #include <cstdint> #include <complex> #include <iostream> #include "byte_packet.h" #include "talshxx.hpp" #include "timers.hpp" #include "cuquantum_executor.hpp" #define HANDLE_CUDA_ERROR(x) \ { const auto err = x; \ if( err != hipSuccess ) \ { printf("#ERROR(cuquantum_executor): %s in line %d\n", hipGetErrorString(err), __LINE__); fflush(stdout); std::abort(); } \ }; #define HANDLE_CTN_ERROR(x) \ { const auto err = x; \ if( err != CUTENSORNET_STATUS_SUCCESS ) \ { printf("#ERROR(cuquantum_executor): %s in line %d\n", cutensornetGetErrorString(err), __LINE__); fflush(stdout); std::abort(); } \ }; namespace exatn { // Cached cutensornet contraction path (inside TensorNetwork): struct TensorNetworkPathCutn { cutensornetContractionOptimizerInfo_t path_cutn; bool initialized = false; void initialize(const cutensornetHandle_t handle, const cutensornetNetworkDescriptor_t network) { HANDLE_CTN_ERROR(cutensornetCreateContractionOptimizerInfo(handle,network,&path_cutn)); initialized = true; } ~TensorNetworkPathCutn() { if(initialized) cutensornetDestroyContractionOptimizerInfo(path_cutn); initialized = false; } }; namespace runtime { /** Retrieves a state of cutensornetContractionOptimizerInfo_t as a plain byte packet. **/ void getCutensornetContractionOptimizerInfoState(cutensornetHandle_t & handle, //cuTensorNet handle cutensornetContractionOptimizerInfo_t & info, //in: cutensornetContractionOptimizerInfo_t object BytePacket * info_state); //out: state of the object as a plain byte packet /** Sets a state of cutensornetContractionOptimizerInfo_t from a plain byte packet. **/ void setCutensornetContractionOptimizerInfoState(cutensornetHandle_t & handle, //cuTensorNet handle cutensornetContractionOptimizerInfo_t & info, //out: cutensornetContractionOptimizerInfo_t object BytePacket * info_state); //in: state of the object as a plain byte packet #ifdef MPI_ENABLED /** Broadcasts a cutensornetContractionOptimizerInfo_t to all MPI processes. **/ void broadcastCutensornetContractionOptimizerInfo(cutensornetHandle_t & handle, //cuTensorNet handle cutensornetContractionOptimizerInfo_t & info, //in: cutensornetContractionOptimizerInfo_t object MPICommProxy & communicator); //in: MPI communicator #endif /** Tensor descriptor (inside a tensor network) **/ struct TensorDescriptor { std::vector<int64_t> extents; //tensor dimension extents std::vector<int64_t> strides; //tensor dimension strides (optional) hipDataType data_type; //tensor element data type std::size_t volume = 0; //tensor body volume std::size_t size = 0; //tensor body size (bytes) void * src_ptr = nullptr; //non-owning pointer to the tensor body source image std::vector<void*> dst_ptr; //non-owning pointer to the tensor body destination image on each GPU }; /** Tensor network processing request **/ struct TensorNetworkReq { TensorNetworkQueue::ExecStat exec_status = TensorNetworkQueue::ExecStat::None; //tensor network execution status int num_procs = 0; //total number of executing processes int proc_id = -1; //id of the current executing process #ifdef MPI_ENABLED MPICommProxy comm; //MPI communicator over executing processes #endif int64_t num_slices = 0; std::shared_ptr<numerics::TensorNetwork> network; //original tensor network specification std::unordered_map<numerics::TensorHashType, TensorDescriptor> tensor_descriptors; //tensor descriptors (shape, volume, data type, body) std::unordered_map<unsigned int, std::vector<int32_t>> tensor_modes; //indices associated with tensor dimensions (key = original tensor id) std::unordered_map<int32_t, int64_t> mode_extents; //extent of each registered tensor mode (mode --> extent) int32_t * num_modes_in = nullptr; int64_t ** extents_in = nullptr; int64_t ** strides_in = nullptr; int32_t ** modes_in = nullptr; uint32_t * alignments_in = nullptr; cutensornetTensorQualifiers_t * qualifiers_in = nullptr; std::vector<void**> gpu_data_in; //vector of owning arrays of non-owning pointers to the input tensor bodies on each GPU int32_t num_modes_out; int64_t * extents_out = nullptr; //non-owning int64_t * strides_out = nullptr; int32_t * modes_out = nullptr; //non-owning uint32_t alignment_out; std::vector<void*> gpu_data_out; //vector of non-owning pointers to the output tensor body on each GPU std::vector<void*> gpu_workspace; //vector of non-owning pointers to the work space on each GPU std::vector<uint64_t> gpu_worksize; //work space size on each GPU std::vector<void*> memory_window_ptr; //end of the GPU memory segment allocated for the tensors on each GPU cutensornetNetworkDescriptor_t net_descriptor; cutensornetContractionOptimizerConfig_t opt_config; std::shared_ptr<TensorNetworkPathCutn> opt_info; std::vector<cutensornetWorkspaceDescriptor_t> workspace_descriptor; //for each GPU std::vector<cutensornetContractionPlan_t> comp_plan; //for each GPU hipDataType data_type; cutensornetComputeType_t compute_type; std::vector<hipStream_t> gpu_stream; //CUDA stream on each GPU std::vector<hipEvent_t> gpu_data_in_start; //event on each GPU std::vector<hipEvent_t> gpu_data_in_finish; //event on each GPU std::vector<hipEvent_t> gpu_compute_start; //event on each GPU std::vector<hipEvent_t> gpu_compute_finish; //event on each GPU std::vector<hipEvent_t> gpu_data_out_finish; //event on each GPU double prepare_start; double prepare_finish; ~TensorNetworkReq() { for(auto & stream: gpu_stream) hipStreamSynchronize(stream); for(auto & event: gpu_data_out_finish) hipEventDestroy(event); for(auto & event: gpu_compute_finish) hipEventDestroy(event); for(auto & event: gpu_compute_start) hipEventDestroy(event); for(auto & event: gpu_data_in_finish) hipEventDestroy(event); for(auto & event: gpu_data_in_start) hipEventDestroy(event); for(auto & stream: gpu_stream) hipStreamDestroy(stream); for(auto & plan: comp_plan) cutensornetDestroyContractionPlan(plan); for(auto & ws_descr: workspace_descriptor) cutensornetDestroyWorkspaceDescriptor(ws_descr); cutensornetDestroyContractionOptimizerConfig(opt_config); cutensornetDestroyNetworkDescriptor(net_descriptor); //if(modes_out != nullptr) delete [] modes_out; if(strides_out != nullptr) delete [] strides_out; //if(extents_out != nullptr) delete [] extents_out; for(auto & data_in: gpu_data_in) if(data_in != nullptr) delete [] data_in; if(qualifiers_in != nullptr) delete [] qualifiers_in; if(alignments_in != nullptr) delete [] alignments_in; if(modes_in != nullptr) delete [] modes_in; if(strides_in != nullptr) delete [] strides_in; if(extents_in != nullptr) delete [] extents_in; if(num_modes_in != nullptr) delete [] num_modes_in; } }; CuQuantumExecutor::CuQuantumExecutor(TensorImplFunc tensor_data_access_func, unsigned int pipeline_depth, unsigned int num_processes, unsigned int process_rank): tensor_data_access_func_(std::move(tensor_data_access_func)), pipe_depth_(pipeline_depth), num_processes_(num_processes), process_rank_(process_rank), flops_(0.0) { static_assert(std::is_same<cutensornetHandle_t,void*>::value,"#FATAL(exatn::runtime::CuQuantumExecutor): cutensornetHandle_t != (void*)"); const size_t version = cutensornetGetVersion(); /*if(process_rank_ == 0){ std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): cuTensorNet backend version " << version << std::endl; std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): Total number of processes = " << num_processes_ << std::endl; }*/ int num_gpus = 0; auto error_code = talshDeviceCount(DEV_NVIDIA_GPU,&num_gpus); assert(error_code == TALSH_SUCCESS); for(int i = 0; i < num_gpus; ++i){ if(talshDeviceState(i,DEV_NVIDIA_GPU) >= DEV_ON){ gpu_attr_.emplace_back(std::make_pair(i,DeviceAttr{})); gpu_attr_.back().second.pipe_level = 0; gpu_attr_.back().second.workspace_ptr = talsh::getDeviceBufferBasePtr(DEV_NVIDIA_GPU,i); assert(reinterpret_cast<std::size_t>(gpu_attr_.back().second.workspace_ptr) % MEM_ALIGNMENT == 0); gpu_attr_.back().second.buffer_size = talsh::getDeviceMaxBufferSize(DEV_NVIDIA_GPU,i); std::size_t wrk_size = (std::size_t)(static_cast<float>(gpu_attr_.back().second.buffer_size) * WORKSPACE_FRACTION); wrk_size -= wrk_size % MEM_ALIGNMENT; gpu_attr_.back().second.workspace_size = wrk_size; gpu_attr_.back().second.buffer_size -= wrk_size; gpu_attr_.back().second.buffer_size -= gpu_attr_.back().second.buffer_size % MEM_ALIGNMENT; gpu_attr_.back().second.buffer_ptr = (void*)(((char*)(gpu_attr_.back().second.workspace_ptr)) + wrk_size); mem_pool_.emplace_back(LinearMemoryPool(gpu_attr_.back().second.buffer_ptr, gpu_attr_.back().second.buffer_size,MEM_ALIGNMENT)); } } //if(process_rank_ == 0) // std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): Number of available GPUs = " << gpu_attr_.size() << std::endl; if(gpu_attr_.empty()){ fatal_error("#FATAL(exatn::runtime::CuQuantumExecutor): cuQuantum backend requires at least one NVIDIA GPU per MPI process!\n"); } for(const auto & gpu: gpu_attr_){ HANDLE_CUDA_ERROR(hipSetDevice(gpu.first)); HANDLE_CTN_ERROR(cutensornetCreate((cutensornetHandle_t*)(&gpu.second.cutn_handle))); } /*if(process_rank_ == 0){ std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): Created cuTensorNet contexts for all available GPUs" << std::endl; std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): GPU configuration:\n"; for(const auto & gpu: gpu_attr_){ std::cout << " GPU #" << gpu.first << ": wrk_ptr = " << gpu.second.workspace_ptr << ", size = " << gpu.second.workspace_size << "; buf_ptr = " << gpu.second.buffer_ptr << ", size = " << gpu.second.buffer_size << std::endl; } }*/ } CuQuantumExecutor::~CuQuantumExecutor() { sync(); for(const auto & gpu: gpu_attr_){ HANDLE_CUDA_ERROR(hipSetDevice(gpu.first)); HANDLE_CTN_ERROR(cutensornetDestroy((cutensornetHandle_t)(gpu.second.cutn_handle))); } //if(process_rank_ == 0) // std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): Destroyed cuTensorNet contexts for all available GPUs" << std::endl; /*std::cout << "#MSG(exatn::cuQuantum): Statistics across all GPU devices:\n"; std::cout << " Number of Flops processed: " << flops_ << std::endl; std::cout << "#END_MSG\n";*/ gpu_attr_.clear(); } #ifdef MPI_ENABLED TensorNetworkQueue::ExecStat CuQuantumExecutor::execute(std::shared_ptr<numerics::TensorNetwork> network, unsigned int num_processes, unsigned int process_rank, const MPICommProxy & communicator, const TensorOpExecHandle exec_handle) #else TensorNetworkQueue::ExecStat CuQuantumExecutor::execute(std::shared_ptr<numerics::TensorNetwork> network, unsigned int num_processes, unsigned int process_rank, const TensorOpExecHandle exec_handle) #endif { assert(network); TensorNetworkQueue::ExecStat exec_stat = TensorNetworkQueue::ExecStat::None; auto res = active_networks_.emplace(std::make_pair(exec_handle, new TensorNetworkReq{})); if(res.second){ auto tn_req = res.first->second; tn_req->network = network; tn_req->exec_status = TensorNetworkQueue::ExecStat::Idle; tn_req->num_procs = num_processes; tn_req->proc_id = process_rank; #ifdef MPI_ENABLED tn_req->comm = communicator; #endif parseTensorNetwork(tn_req); //still Idle loadTensors(tn_req); //Idle --> Loading if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Loading){ planExecution(tn_req); //Loading --> Planning (while loading data) if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Planning){ contractTensorNetwork(tn_req); //Planning --> Executing } } exec_stat = tn_req->exec_status; }else{ std::cout << "#WARNING(exatn::runtime::CuQuantumExecutor): execute: Repeated tensor network submission detected!\n"; } return exec_stat; } TensorNetworkQueue::ExecStat CuQuantumExecutor::sync(const TensorOpExecHandle exec_handle, int * error_code, int64_t * num_slices, std::vector<ExecutionTimings> * timings) { *error_code = 0; TensorNetworkQueue::ExecStat exec_stat = TensorNetworkQueue::ExecStat::None; auto iter = active_networks_.find(exec_handle); if(iter != active_networks_.end()){ auto tn_req = iter->second; if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Executing){ testCompletion(tn_req); //Executing --> Completed }else{ if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Idle) loadTensors(tn_req); //Idle --> Loading if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Loading) planExecution(tn_req); //Loading --> Planning (while loading data) if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Planning) contractTensorNetwork(tn_req); //Planning --> Executing } exec_stat = tn_req->exec_status; if(exec_stat == TensorNetworkQueue::ExecStat::Completed){ if(num_slices != nullptr) *num_slices = tn_req->num_slices; if(timings != nullptr){ const int num_gpus = gpu_attr_.size(); (*timings).resize(num_gpus); for(int gpu = 0; gpu < num_gpus; ++gpu){ (*timings)[gpu].prepare = (tn_req->prepare_finish - tn_req->prepare_start) * 1000.0; //ms HANDLE_CUDA_ERROR(hipEventElapsedTime(&((*timings)[gpu].data_in), tn_req->gpu_data_in_start[gpu],tn_req->gpu_data_in_finish[gpu])); HANDLE_CUDA_ERROR(hipEventElapsedTime(&((*timings)[gpu].data_out), tn_req->gpu_compute_finish[gpu],tn_req->gpu_data_out_finish[gpu])); HANDLE_CUDA_ERROR(hipEventElapsedTime(&((*timings)[gpu].compute), tn_req->gpu_compute_start[gpu],tn_req->gpu_compute_finish[gpu])); } } } tn_req.reset(); if(exec_stat == TensorNetworkQueue::ExecStat::Completed) active_networks_.erase(iter); } return exec_stat; } void CuQuantumExecutor::sync() { while(!active_networks_.empty()){ for(auto iter = active_networks_.begin(); iter != active_networks_.end(); ++iter){ int error_code = 0; const auto exec_stat = sync(iter->first,&error_code); assert(error_code == 0); if(exec_stat == TensorNetworkQueue::ExecStat::Completed) break; } } return; } static hipDataType getCudaDataType(const TensorElementType elem_type) { hipDataType cuda_data_type; switch(elem_type){ case TensorElementType::REAL32: cuda_data_type = HIP_R_32F; break; case TensorElementType::REAL64: cuda_data_type = HIP_R_64F; break; case TensorElementType::COMPLEX32: cuda_data_type = HIP_C_32F; break; case TensorElementType::COMPLEX64: cuda_data_type = HIP_C_64F; break; default: assert(false); } return cuda_data_type; } static cutensornetComputeType_t getCutensorComputeType(const TensorElementType elem_type) { cutensornetComputeType_t cutensor_data_type; switch(elem_type){ case TensorElementType::REAL32: cutensor_data_type = CUTENSORNET_COMPUTE_32F; break; case TensorElementType::REAL64: cutensor_data_type = CUTENSORNET_COMPUTE_64F; break; case TensorElementType::COMPLEX32: cutensor_data_type = CUTENSORNET_COMPUTE_32F; break; case TensorElementType::COMPLEX64: cutensor_data_type = CUTENSORNET_COMPUTE_64F; break; default: assert(false); } return cutensor_data_type; } void CuQuantumExecutor::acquireWorkspace(unsigned int dev, void ** workspace_ptr, uint64_t * workspace_size) { assert(dev < gpu_attr_.size()); auto & dev_attr = gpu_attr_[dev].second; *workspace_size = dev_attr.workspace_size / pipe_depth_; *workspace_ptr = (void*)((char*)(dev_attr.workspace_ptr) + ((*workspace_size) * dev_attr.pipe_level)); dev_attr.pipe_level = (++(dev_attr.pipe_level)) % pipe_depth_; return; } void CuQuantumExecutor::parseTensorNetwork(std::shared_ptr<TensorNetworkReq> tn_req) { const int num_gpus = gpu_attr_.size(); const auto & net = *(tn_req->network); const int32_t num_input_tensors = net.getNumTensors(); tn_req->num_modes_in = new int32_t[num_input_tensors]; tn_req->extents_in = new int64_t*[num_input_tensors]; tn_req->strides_in = new int64_t*[num_input_tensors]; tn_req->modes_in = new int32_t*[num_input_tensors]; tn_req->alignments_in = new uint32_t[num_input_tensors]; tn_req->qualifiers_in = new cutensornetTensorQualifiers_t[num_input_tensors]; tn_req->gpu_data_in.resize(num_gpus,nullptr); for(auto & data_in: tn_req->gpu_data_in) data_in = new void*[num_input_tensors]; tn_req->gpu_data_out.resize(num_gpus,nullptr); tn_req->gpu_workspace.resize(num_gpus,nullptr); tn_req->gpu_worksize.resize(num_gpus,0); for(unsigned int i = 0; i < num_input_tensors; ++i) tn_req->strides_in[i] = NULL; for(unsigned int i = 0; i < num_input_tensors; ++i) tn_req->alignments_in[i] = MEM_ALIGNMENT; tn_req->strides_out = NULL; tn_req->alignment_out = MEM_ALIGNMENT; if(logging_ > 0){ std::cout << "#INFO(exatn::runtime::CuQuantumExecutor): Dumping tensor network " << net.getName() << " {\n"; } std::vector<int32_t> output_tensor_modes(net.getTensor(0)->getRank()); int32_t mode_id = 0, tens_num = 0; for(auto iter = net.cbegin(); iter != net.cend(); ++iter){ const auto tens_id = iter->first; const auto & tens = iter->second; const auto tens_hash = tens.getTensor()->getTensorHash(); const auto tens_vol = tens.getTensor()->getVolume(); const auto tens_rank = tens.getRank(); const auto tens_type = tens.getElementType(); if(tens_type == TensorElementType::VOID){ std::cout << "#ERROR(exatn::runtime::CuQuantumExecutor): Network tensor #" << tens_id << " has not been allocated typed storage yet!\n"; std::abort(); } const auto & tens_legs = tens.getTensorLegs(); const auto & tens_dims = tens.getDimExtents(); auto res0 = tn_req->tensor_descriptors.emplace(std::make_pair(tens_hash,TensorDescriptor{})); if(res0.second){ auto & descr = res0.first->second; descr.extents.resize(tens_rank); for(unsigned int i = 0; i < tens_rank; ++i) descr.extents[i] = tens_dims[i]; descr.data_type = getCudaDataType(tens_type); descr.volume = tens_vol; descr.src_ptr = tensor_data_access_func_(*(tens.getTensor()),DEV_HOST,0,&(descr.size)); //`Assuming tensor body is on Host assert(descr.src_ptr != nullptr); } auto res1 = tn_req->tensor_modes.emplace(std::make_pair(tens_id,std::vector<int32_t>(tens_rank))); assert(res1.second); for(unsigned int i = 0; i < tens_rank; ++i){ const auto other_tens_id = tens_legs[i].getTensorId(); const auto other_tens_leg_id = tens_legs[i].getDimensionId(); auto other_tens_iter = tn_req->tensor_modes.find(other_tens_id); if(other_tens_iter == tn_req->tensor_modes.end()){ res1.first->second[i] = ++mode_id; auto new_mode = tn_req->mode_extents.emplace(std::make_pair(mode_id,tens_dims[i])); }else{ res1.first->second[i] = other_tens_iter->second[other_tens_leg_id]; } } if(tens_id == 0){ //output tensor tn_req->num_modes_out = tens_rank; tn_req->extents_out = res0.first->second.extents.data(); tn_req->modes_out = res1.first->second.data(); }else{ //input tensors tn_req->num_modes_in[tens_num] = tens_rank; tn_req->extents_in[tens_num] = res0.first->second.extents.data(); tn_req->modes_in[tens_num] = res1.first->second.data(); tn_req->qualifiers_in[tens_num].isConjugate = static_cast<int32_t>(tens.isComplexConjugated()); tn_req->qualifiers_in[tens_num].isConstant = static_cast<int32_t>(!tens.isOptimizable()); ++tens_num; } if(logging_ > 0){ if(tens_id == 0) { for(unsigned int i = 0; i < tens_rank; ++i) output_tensor_modes[i] = res1.first->second[i]; }else{ for(unsigned int i = 0; i < tens_rank; ++i) std::cout << " " << res1.first->second[i]; std::cout << " |"; for(unsigned int i = 0; i < tens_rank; ++i) std::cout << " " << tens_dims[i]; std::cout << std::endl; } } } if(logging_ > 0){ std::cout << "---" << std::endl; for(const auto & md: output_tensor_modes) std::cout << " " << md; std::cout << std::endl << "}" << std::endl << std::flush; } const auto tens_elem_type = net.getTensorElementType(); tn_req->data_type = getCudaDataType(tens_elem_type); tn_req->compute_type = getCutensorComputeType(tens_elem_type); //Create the GPU execution plan, stream and events on each GPU: tn_req->workspace_descriptor.resize(num_gpus); tn_req->comp_plan.resize(num_gpus); tn_req->gpu_stream.resize(num_gpus); tn_req->gpu_data_in_start.resize(num_gpus); tn_req->gpu_data_in_finish.resize(num_gpus); tn_req->gpu_compute_start.resize(num_gpus); tn_req->gpu_compute_finish.resize(num_gpus); tn_req->gpu_data_out_finish.resize(num_gpus); for(int gpu = 0; gpu < num_gpus; ++gpu){ const auto gpu_id = gpu_attr_[gpu].first; HANDLE_CUDA_ERROR(hipSetDevice(gpu_id)); HANDLE_CUDA_ERROR(hipStreamCreate(&(tn_req->gpu_stream[gpu]))); HANDLE_CUDA_ERROR(hipEventCreate(&(tn_req->gpu_data_in_start[gpu]))); HANDLE_CUDA_ERROR(hipEventCreate(&(tn_req->gpu_data_in_finish[gpu]))); HANDLE_CUDA_ERROR(hipEventCreate(&(tn_req->gpu_compute_start[gpu]))); HANDLE_CUDA_ERROR(hipEventCreate(&(tn_req->gpu_compute_finish[gpu]))); HANDLE_CUDA_ERROR(hipEventCreate(&(tn_req->gpu_data_out_finish[gpu]))); } //Create the cuTensorNet tensor network descriptor (not GPU specific): HANDLE_CTN_ERROR(cutensornetCreateNetworkDescriptor(gpu_attr_[0].second.cutn_handle,num_input_tensors, tn_req->num_modes_in,tn_req->extents_in,tn_req->strides_in,tn_req->modes_in,tn_req->qualifiers_in, tn_req->num_modes_out,tn_req->extents_out,tn_req->strides_out,tn_req->modes_out, tn_req->data_type,tn_req->compute_type,&(tn_req->net_descriptor))); return; } void CuQuantumExecutor::loadTensors(std::shared_ptr<TensorNetworkReq> tn_req) { const auto out_tens_hash = tn_req->network->getTensor(0)->getTensorHash(); //Load tensors to all GPUs: const int num_gpus = gpu_attr_.size(); for(int gpu = 0; gpu < num_gpus; ++gpu){ const auto gpu_id = gpu_attr_[gpu].first; HANDLE_CUDA_ERROR(hipSetDevice(gpu_id)); void * prev_front = mem_pool_[gpu].getFront(); bool success = true; //Acquire device memory: for(auto & descr: tn_req->tensor_descriptors){ void * dev_ptr = mem_pool_[gpu].acquireMemory(descr.second.size); success = (dev_ptr != nullptr); if(!success) break; descr.second.dst_ptr.emplace_back(dev_ptr); } if(success){ //Initiate data transfers: HANDLE_CUDA_ERROR(hipEventRecord(tn_req->gpu_data_in_start[gpu],tn_req->gpu_stream[gpu])); for(auto & descr: tn_req->tensor_descriptors){ if(descr.first == out_tens_hash){ //output tensor: Set to 0 HANDLE_CUDA_ERROR(hipMemsetAsync(descr.second.dst_ptr.back(),0,descr.second.size,tn_req->gpu_stream[gpu])); }else{ //input tensors: Copy from their original locations /*std::cout << "#DEBUG(exatn::CuQuantumExecutor): loadTensors: " << descr.second.dst_ptr.back() << " " << descr.second.src_ptr << " " << descr.second.size << std::endl << std::flush; //debug*/ HANDLE_CUDA_ERROR(hipMemcpyAsync(descr.second.dst_ptr.back(),descr.second.src_ptr, descr.second.size,hipMemcpyDefault,tn_req->gpu_stream[gpu])); } } HANDLE_CUDA_ERROR(hipEventRecord(tn_req->gpu_data_in_finish[gpu],tn_req->gpu_stream[gpu])); tn_req->memory_window_ptr.emplace_back(mem_pool_[gpu].getFront()); auto & net = *(tn_req->network); int32_t tens_num = 0; for(auto iter = net.cbegin(); iter != net.cend(); ++iter){ const auto tens_id = iter->first; const auto & tens = iter->second; const auto tens_hash = tens.getTensor()->getTensorHash(); auto descr = tn_req->tensor_descriptors.find(tens_hash); void * dev_ptr = descr->second.dst_ptr.back(); if(tens_id == 0){ tn_req->gpu_data_out[gpu] = dev_ptr; }else{ tn_req->gpu_data_in[gpu][tens_num++] = dev_ptr; } } }else{ //no enough memory currently //Restore previous memory front: mem_pool_[gpu].restorePreviousFront(prev_front); return; } } tn_req->exec_status = TensorNetworkQueue::ExecStat::Loading; return; } void getCutensornetContractionOptimizerInfoState(cutensornetHandle_t & handle, cutensornetContractionOptimizerInfo_t & info, BytePacket * info_state) { cutensornetContractionPath_t contr_path{0,nullptr}; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); assert(contr_path.numContractions >= 0); appendToBytePacket(info_state,contr_path.numContractions); if(contr_path.numContractions > 0){ contr_path.data = new cutensornetNodePair_t[contr_path.numContractions]; assert(contr_path.data != nullptr); HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); for(int32_t i = 0; i < contr_path.numContractions; ++i){ appendToBytePacket(info_state,contr_path.data[i].first); appendToBytePacket(info_state,contr_path.data[i].second); } int32_t num_sliced_modes = 0; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_NUM_SLICED_MODES, &num_sliced_modes,sizeof(num_sliced_modes))); assert(num_sliced_modes >= 0); appendToBytePacket(info_state,num_sliced_modes); if(num_sliced_modes > 0){ cutensornetSlicingConfig_t sliced_modes{0, new cutensornetSliceInfoPair_t[num_sliced_modes]}; assert(sliced_modes.data != nullptr); HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICING_CONFIG, &sliced_modes,sizeof(sliced_modes))); assert(sliced_modes.numSlicedModes == num_sliced_modes); for(int32_t i = 0; i < num_sliced_modes; ++i){ appendToBytePacket(info_state,sliced_modes.data[i].slicedMode); appendToBytePacket(info_state,sliced_modes.data[i].slicedExtent); } delete [] sliced_modes.data; /*std::vector<int32_t> sliced_modes(num_sliced_modes); HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICED_MODE, sliced_modes.data(),sliced_modes.size()*sizeof(int32_t))); for(int32_t i = 0; i < num_sliced_modes; ++i){ appendToBytePacket(info_state,sliced_modes[i]); } std::vector<int64_t> sliced_extents(num_sliced_modes); HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICED_EXTENT, sliced_extents.data(),sliced_extents.size()*sizeof(int64_t))); for(int32_t i = 0; i < num_sliced_modes; ++i){ appendToBytePacket(info_state,sliced_extents[i]); }*/ } delete [] contr_path.data; } return; } void setCutensornetContractionOptimizerInfoState(cutensornetHandle_t & handle, cutensornetContractionOptimizerInfo_t & info, BytePacket * info_state) { cutensornetContractionPath_t contr_path{0,nullptr}; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); assert(contr_path.numContractions >= 0); int32_t num_contractions = 0; extractFromBytePacket(info_state,num_contractions); assert(num_contractions == contr_path.numContractions); if(contr_path.numContractions > 0){ contr_path.data = new cutensornetNodePair_t[contr_path.numContractions]; assert(contr_path.data != nullptr); int32_t first, second; for(int32_t i = 0; i < contr_path.numContractions; ++i){ extractFromBytePacket(info_state,first); extractFromBytePacket(info_state,second); contr_path.data[i].first = first; contr_path.data[i].second = second; } HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoSetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); int32_t num_sliced_modes = 0; extractFromBytePacket(info_state,num_sliced_modes); assert(num_sliced_modes >= 0); if(num_sliced_modes > 0){ cutensornetSlicingConfig_t sliced_modes{static_cast<uint32_t>(num_sliced_modes), new cutensornetSliceInfoPair_t[num_sliced_modes]}; assert(sliced_modes.data != nullptr); for(int32_t i = 0; i < num_sliced_modes; ++i){ extractFromBytePacket(info_state,sliced_modes.data[i].slicedMode); extractFromBytePacket(info_state,sliced_modes.data[i].slicedExtent); } HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoSetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICING_CONFIG, &sliced_modes,sizeof(sliced_modes))); delete [] sliced_modes.data; } /*HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoSetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_NUM_SLICED_MODES, &num_sliced_modes,sizeof(num_sliced_modes))); if(num_sliced_modes > 0){ std::vector<int32_t> sliced_modes(num_sliced_modes); for(int32_t i = 0; i < num_sliced_modes; ++i){ extractFromBytePacket(info_state,sliced_modes[i]); } HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoSetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICED_MODE, sliced_modes.data(),sliced_modes.size()*sizeof(int32_t))); std::vector<int64_t> sliced_extents(num_sliced_modes); for(int32_t i = 0; i < num_sliced_modes; ++i){ extractFromBytePacket(info_state,sliced_extents[i]); } HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoSetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICED_EXTENT, sliced_extents.data(),sliced_extents.size()*sizeof(int64_t))); }*/ delete [] contr_path.data; } return; } #ifdef MPI_ENABLED void broadcastCutensornetContractionOptimizerInfo(cutensornetHandle_t & handle, cutensornetContractionOptimizerInfo_t & info, MPICommProxy & communicator) { double flops = 0.0; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_FLOP_COUNT, &flops,sizeof(flops))); assert(flops >= 0.0); auto & mpi_comm = communicator.getRef<MPI_Comm>(); int my_rank = -1; auto errc = MPI_Comm_rank(mpi_comm, &my_rank); assert(errc == MPI_SUCCESS); struct {double flop_count; int mpi_rank;} my_flop{flops,my_rank}, best_flop{0.0,-1}; errc = MPI_Allreduce((void*)(&my_flop),(void*)(&best_flop),1,MPI_DOUBLE_INT,MPI_MINLOC,mpi_comm); assert(errc == MPI_SUCCESS); cutensornetContractionPath_t contr_path{0,nullptr}; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); assert(contr_path.numContractions >= 0); if(contr_path.numContractions > 0){ contr_path.data = new cutensornetNodePair_t[contr_path.numContractions]; assert(contr_path.data != nullptr); HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); int32_t num_sliced_modes = 0; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_NUM_SLICED_MODES, &num_sliced_modes,sizeof(num_sliced_modes))); std::size_t packet_capacity = ((sizeof(int32_t) * 2 * contr_path.numContractions) + ((sizeof(int32_t) + sizeof(int64_t)) * num_sliced_modes)) * 2 + 1024; //upper bound BytePacket packet; initBytePacket(&packet,packet_capacity); if(my_rank == best_flop.mpi_rank){ getCutensornetContractionOptimizerInfoState(handle,info,&packet); } int packet_size = packet.size_bytes; errc = MPI_Bcast((void*)(&packet_size),1,MPI_INT,best_flop.mpi_rank,mpi_comm); assert(errc == MPI_SUCCESS); if(my_rank != best_flop.mpi_rank){ packet.size_bytes = packet_size; } errc = MPI_Bcast(packet.base_addr,packet_size,MPI_CHAR,best_flop.mpi_rank,mpi_comm); assert(errc == MPI_SUCCESS); if(my_rank != best_flop.mpi_rank){ setCutensornetContractionOptimizerInfoState(handle,info,&packet); } destroyBytePacket(&packet); delete [] contr_path.data; } return; } #endif void CuQuantumExecutor::planExecution(std::shared_ptr<TensorNetworkReq> tn_req) { //Configure tensor network contraction on all GPUs: tn_req->prepare_start = Timer::timeInSecHR(); const int num_gpus = gpu_attr_.size(); for(int gpu = 0; gpu < num_gpus; ++gpu){ const int gpu_id = gpu_attr_[gpu].first; HANDLE_CUDA_ERROR(hipSetDevice(gpu_id)); acquireWorkspace(gpu,&(tn_req->gpu_workspace[gpu]),&(tn_req->gpu_worksize[gpu])); } const auto min_gpu_workspace_size = *(std::min_element(tn_req->gpu_worksize.cbegin(),tn_req->gpu_worksize.cend())); for(int gpu = 0; gpu < num_gpus; ++gpu){ const int gpu_id = gpu_attr_[gpu].first; HANDLE_CUDA_ERROR(hipSetDevice(gpu_id)); if(gpu == 0){ //tensor network contraction path needs to be computed only once const int32_t min_slices = tn_req->num_procs * num_gpus; //ensure parallelism HANDLE_CTN_ERROR(cutensornetCreateContractionOptimizerConfig(gpu_attr_[gpu].second.cutn_handle,&(tn_req->opt_config))); HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_SLICER_MIN_SLICES, &min_slices,sizeof(min_slices))); const cutensornetOptimizerCost_t cost_func = CUTENSORNET_OPTIMIZER_COST_TIME; HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_COST_FUNCTION_OBJECTIVE, &cost_func,sizeof(cost_func))); const int32_t hyper_samples = (int(MIN_HYPER_SAMPLES) - 1) / (tn_req->num_procs) + 1; HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_HYPER_NUM_SAMPLES, &hyper_samples,sizeof(hyper_samples))); const int32_t reconfig_iter = RECONFIG_ITERATIONS; HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_RECONFIG_NUM_ITERATIONS, &reconfig_iter,sizeof(reconfig_iter))); const int32_t reconfig_leaves = RECONFIG_LEAVES; HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_RECONFIG_NUM_LEAVES, &reconfig_leaves,sizeof(reconfig_leaves))); const int32_t rnd_seed = (tn_req->proc_id + 1); HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_SEED, &rnd_seed,sizeof(rnd_seed))); auto cached_path = tn_req->network->getCuTensorNetPath(); if(cached_path){ tn_req->opt_info = cached_path; }else{ tn_req->opt_info = std::make_shared<TensorNetworkPathCutn>(); tn_req->opt_info->initialize(gpu_attr_[gpu].second.cutn_handle,tn_req->net_descriptor); HANDLE_CTN_ERROR(cutensornetContractionOptimize(gpu_attr_[gpu].second.cutn_handle, tn_req->net_descriptor,tn_req->opt_config, min_gpu_workspace_size,tn_req->opt_info->path_cutn)); tn_req->network->setCuTensorNetPath(tn_req->opt_info); } #ifdef MPI_ENABLED if(tn_req->num_procs > 1){ broadcastCutensornetContractionOptimizerInfo(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_info->path_cutn,tn_req->comm); } #endif double flops = 0.0; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(gpu_attr_[gpu].second.cutn_handle, tn_req->opt_info->path_cutn, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_FLOP_COUNT, &flops,sizeof(flops))); assert(flops >= 0.0); const double total_flops = flops * 0.5 * tensorElementTypeOpFactor(tn_req->network->getTensorElementType()); flops_ += ((flops * 0.5) / static_cast<double>(tn_req->num_procs)) * //assuming uniform work distribution (not true in general) tensorElementTypeOpFactor(tn_req->network->getTensorElementType()); //std::cout << "#INFO(exatn::CuQuantumExecutor): Path found for network " << tn_req->network->getTensorNetworkHash() // << " with total Flop count = " << std::scientific << (total_flops/1e9) << " Gflop" << std::endl; //debug tn_req->num_slices = 0; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(gpu_attr_[gpu].second.cutn_handle, tn_req->opt_info->path_cutn, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_NUM_SLICES, &(tn_req->num_slices),sizeof(tn_req->num_slices))); assert(tn_req->num_slices > 0); } HANDLE_CTN_ERROR(cutensornetCreateWorkspaceDescriptor(gpu_attr_[gpu].second.cutn_handle,&(tn_req->workspace_descriptor[gpu]))); HANDLE_CTN_ERROR(cutensornetWorkspaceComputeSizes(gpu_attr_[gpu].second.cutn_handle, tn_req->net_descriptor,tn_req->opt_info->path_cutn, tn_req->workspace_descriptor[gpu])); uint64_t required_workspace_size = 0; HANDLE_CTN_ERROR(cutensornetWorkspaceGetSize(gpu_attr_[gpu].second.cutn_handle, tn_req->workspace_descriptor[gpu], CUTENSORNET_WORKSIZE_PREF_MIN, CUTENSORNET_MEMSPACE_DEVICE, &required_workspace_size)); if(required_workspace_size > tn_req->gpu_worksize[gpu]){ fatal_error("#ERROR(exatn::CuQuantumExecutor::planExecution): Insufficient work space on GPU "+std::to_string(gpu)+"!\n"); } HANDLE_CTN_ERROR(cutensornetWorkspaceSet(gpu_attr_[gpu].second.cutn_handle, tn_req->workspace_descriptor[gpu], CUTENSORNET_MEMSPACE_DEVICE, tn_req->gpu_workspace[gpu],tn_req->gpu_worksize[gpu])); HANDLE_CTN_ERROR(cutensornetCreateContractionPlan(gpu_attr_[gpu].second.cutn_handle, tn_req->net_descriptor,tn_req->opt_info->path_cutn, tn_req->workspace_descriptor[gpu],&(tn_req->comp_plan[gpu]))); } tn_req->prepare_finish = Timer::timeInSecHR(); tn_req->exec_status = TensorNetworkQueue::ExecStat::Planning; return; } void accumulateOutputOnHost(TensorElementType elem_type, void * out_ptr, void * tmp_ptr, std::size_t vol) { auto accumulate = [](auto * ptr0, const auto * ptr1, auto count){ #pragma omp parallel for schedule(guided) shared(count,ptr0,ptr1) for(std::size_t i = 0; i < count; ++i) ptr0[i] += ptr1[i]; return; }; switch(elem_type){ case TensorElementType::REAL32: accumulate(static_cast<float*>(out_ptr),static_cast<float*>(tmp_ptr),vol); break; case TensorElementType::REAL64: accumulate(static_cast<double*>(out_ptr),static_cast<double*>(tmp_ptr),vol); break; case TensorElementType::COMPLEX32: accumulate(static_cast<std::complex<float>*>(out_ptr),static_cast<std::complex<float>*>(tmp_ptr),vol); break; case TensorElementType::COMPLEX64: accumulate(static_cast<std::complex<double>*>(out_ptr),static_cast<std::complex<double>*>(tmp_ptr),vol); break; default: assert(false); } return; } void CuQuantumExecutor::contractTensorNetwork(std::shared_ptr<TensorNetworkReq> tn_req) { //Execute the contraction plans on all GPUs: const int num_gpus = gpu_attr_.size(); const int64_t total_gpus = tn_req->num_procs * num_gpus; for(int gpu = 0; gpu < num_gpus; ++gpu){ HANDLE_CUDA_ERROR(hipEventRecord(tn_req->gpu_compute_start[gpu],tn_req->gpu_stream[gpu])); } for(int64_t slice_base_id = tn_req->proc_id * num_gpus; slice_base_id < tn_req->num_slices; slice_base_id += total_gpus){ const int64_t slice_end = ::min(tn_req->num_slices,(int64_t)(slice_base_id + num_gpus)); for(int64_t slice_id = slice_base_id; slice_id < slice_end; ++slice_id){ const int gpu = static_cast<int>(slice_id - slice_base_id); HANDLE_CTN_ERROR(cutensornetContraction(gpu_attr_[gpu].second.cutn_handle, tn_req->comp_plan[gpu], tn_req->gpu_data_in[gpu],tn_req->gpu_data_out[gpu], tn_req->workspace_descriptor[gpu], slice_id,tn_req->gpu_stream[gpu])); } } for(int gpu = 0; gpu < num_gpus; ++gpu){ HANDLE_CUDA_ERROR(hipEventRecord(tn_req->gpu_compute_finish[gpu],tn_req->gpu_stream[gpu])); } //Retrieve the output tensor from all GPUs and perform reduction: auto output_tensor = tn_req->network->getTensor(0); const auto out_elem_type = output_tensor->getElementType(); const auto out_elem_size = TensorElementTypeSize(out_elem_type); const auto output_hash = output_tensor->getTensorHash(); auto iter = tn_req->tensor_descriptors.find(output_hash); assert(iter != tn_req->tensor_descriptors.cend()); const auto & descr = iter->second; if(num_gpus > 1){ //`Blocking solution for output reduction (temporary) const auto dev_id = talshFlatDevId(DEV_HOST,0); void * host_out_tens = nullptr; auto errc = mem_allocate(dev_id,descr.size,YEP,&host_out_tens); if(errc != 0 || host_out_tens == nullptr){ fatal_error("#ERROR(exatn::CuQuantumExecutor::contractTensorNetwork): Insufficient memory space in the Host buffer!\n"); } struct ReductionBuf{void * tmp_ptr; void * out_ptr; void * gpu_ptr; std::size_t vol;}; assert(MEM_ALIGNMENT % out_elem_size == 0); const auto vol0 = (descr.volume / 2) - ((descr.volume / 2) % (MEM_ALIGNMENT / out_elem_size)); const auto vol1 = descr.volume - vol0; std::vector<ReductionBuf> red_buf = {ReductionBuf{host_out_tens, descr.src_ptr, nullptr, vol0}, ReductionBuf{(void*)(((char*)host_out_tens)+(vol0*out_elem_size)), (void*)(((char*)descr.src_ptr)+(vol0*out_elem_size)), nullptr, vol1}}; bool first_iteration = true; for(int gpu = 0; gpu < num_gpus; ++gpu){ red_buf[0].gpu_ptr = descr.dst_ptr[gpu]; red_buf[1].gpu_ptr = (void*)(((char*)descr.dst_ptr[gpu])+(vol0*out_elem_size)); for(int part = 0; part < 2; ++part){ HANDLE_CUDA_ERROR(hipMemcpyAsync(red_buf[part].tmp_ptr,red_buf[part].gpu_ptr, red_buf[part].vol*out_elem_size,hipMemcpyDefault,tn_req->gpu_stream[gpu])); if(first_iteration){ HANDLE_CUDA_ERROR(hipEventRecord(tn_req->gpu_data_out_finish[gpu],tn_req->gpu_stream[gpu])); first_iteration = false; }else{ if(part == 0){ HANDLE_CUDA_ERROR(hipEventSynchronize(tn_req->gpu_data_out_finish[gpu-1])); }else{ HANDLE_CUDA_ERROR(hipEventSynchronize(tn_req->gpu_data_out_finish[gpu])); } HANDLE_CUDA_ERROR(hipEventRecord(tn_req->gpu_data_out_finish[gpu],tn_req->gpu_stream[gpu])); const auto other_part = 1 - part; accumulateOutputOnHost(out_elem_type,red_buf[other_part].out_ptr,red_buf[other_part].tmp_ptr,red_buf[other_part].vol); } } } errc = mem_free(dev_id,&host_out_tens); if(errc != 0){ fatal_error("#ERROR(exatn::CuQuantumExecutor::contractTensorNetwork): Unable to free a temporary Host buffer entry!\n"); } }else{ HANDLE_CUDA_ERROR(hipMemcpyAsync(descr.src_ptr,descr.dst_ptr[0], descr.size,hipMemcpyDefault,tn_req->gpu_stream[0])); HANDLE_CUDA_ERROR(hipEventRecord(tn_req->gpu_data_out_finish[0],tn_req->gpu_stream[0])); } tn_req->exec_status = TensorNetworkQueue::ExecStat::Executing; return; } void CuQuantumExecutor::testCompletion(std::shared_ptr<TensorNetworkReq> tn_req) { //Test completion on all GPUs: bool all_completed = true; const int num_gpus = gpu_attr_.size(); for(int gpu = 0; gpu < num_gpus; ++gpu){ const auto gpu_id = gpu_attr_[gpu].first; HANDLE_CUDA_ERROR(hipSetDevice(gpu_id)); hipError_t cuda_error = hipEventQuery(tn_req->gpu_data_out_finish[gpu]); if(cuda_error != hipErrorNotReady){ if(tn_req->memory_window_ptr[gpu] != nullptr){ mem_pool_[gpu].releaseMemory(tn_req->memory_window_ptr[gpu]); tn_req->memory_window_ptr[gpu] = nullptr; } }else{ all_completed = false; } } if(all_completed){ #ifdef MPI_ENABLED //Global output reduction across all participating MPI processes: if(tn_req->num_procs > 1){ const auto out_elem_type = tn_req->network->getTensor(0)->getElementType(); const auto out_tens_hash = tn_req->network->getTensor(0)->getTensorHash(); const auto & descr = tn_req->tensor_descriptors[out_tens_hash]; auto & mpi_comm = tn_req->comm.getRef<MPI_Comm>(); int errc = MPI_SUCCESS; switch(out_elem_type){ case TensorElementType::REAL32: errc = MPI_Allreduce(MPI_IN_PLACE,descr.src_ptr,descr.volume,MPI_FLOAT,MPI_SUM,mpi_comm); break; case TensorElementType::REAL64: errc = MPI_Allreduce(MPI_IN_PLACE,descr.src_ptr,descr.volume,MPI_DOUBLE,MPI_SUM,mpi_comm); break; case TensorElementType::COMPLEX32: errc = MPI_Allreduce(MPI_IN_PLACE,descr.src_ptr,descr.volume,MPI_CXX_FLOAT_COMPLEX,MPI_SUM,mpi_comm); break; case TensorElementType::COMPLEX64: errc = MPI_Allreduce(MPI_IN_PLACE,descr.src_ptr,descr.volume,MPI_CXX_DOUBLE_COMPLEX,MPI_SUM,mpi_comm); break; default: fatal_error("#ERROR(exatn::CuQuantumExecutor::testCompletion): Invalid tensor element type!"); } assert(errc == MPI_SUCCESS); } #endif tn_req->exec_status = TensorNetworkQueue::ExecStat::Completed; } return; } ExecutionTimings ExecutionTimings::computeAverage(const std::vector<ExecutionTimings> & timings) { ExecutionTimings result_timings = std::accumulate(timings.cbegin(),timings.cend(), ExecutionTimings{0.0f,0.0f,0.0f,0.0f}, [](const ExecutionTimings & first, const ExecutionTimings & second){ return ExecutionTimings{first.prepare + second.prepare, first.data_in + second.data_in, first.data_out + second.data_out, first.compute + second.compute}; }); if(!timings.empty()){ const float num_elems = static_cast<float>(timings.size()); result_timings.prepare /= num_elems; result_timings.data_in /= num_elems; result_timings.data_out /= num_elems; result_timings.compute /= num_elems; } return std::move(result_timings); } ExecutionTimings ExecutionTimings::computeWorst(const std::vector<ExecutionTimings> & timings) { ExecutionTimings result_timings{0.0f,0.0f,0.0f,0.0f}; if(!timings.empty()){ result_timings = std::accumulate(timings.cbegin()+1,timings.cend(), timings[0], [](const ExecutionTimings & first, const ExecutionTimings & second){ return ExecutionTimings{::max(first.prepare,second.prepare), ::max(first.data_in,second.data_in), ::max(first.data_out,second.data_out), ::max(first.compute,second.compute)}; }); } return std::move(result_timings); } ExecutionTimings ExecutionTimings::computeBest(const std::vector<ExecutionTimings> & timings) { ExecutionTimings result_timings{0.0f,0.0f,0.0f,0.0f}; if(!timings.empty()){ result_timings = std::accumulate(timings.cbegin()+1,timings.cend(), timings[0], [](const ExecutionTimings & first, const ExecutionTimings & second){ return ExecutionTimings{::min(first.prepare,second.prepare), ::min(first.data_in,second.data_in), ::min(first.data_out,second.data_out), ::min(first.compute,second.compute)}; }); } return std::move(result_timings); } int CuQuantumExecutor::logging_ {0}; } //namespace runtime } //namespace exatn #endif //CUQUANTUM
df50617e70a6404ec1e84af8924cd19cec466649.cu
/** ExaTN: Tensor Runtime: Tensor network executor: NVIDIA cuQuantum REVISION: 2023/05/15 Copyright (C) 2018-2023 Dmitry Lyakh Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) Copyright (C) 2022-2023 NVIDIA Corporation SPDX-License-Identifier: BSD-3-Clause **/ #ifdef CUQUANTUM #include <cutensornet.h> #include <cutensor.h> #include <cuda_runtime.h> #ifdef MPI_ENABLED #include "mpi.h" #endif #include <algorithm> #include <vector> #include <unordered_map> #include <numeric> #include <type_traits> #include <cstdint> #include <complex> #include <iostream> #include "byte_packet.h" #include "talshxx.hpp" #include "timers.hpp" #include "cuquantum_executor.hpp" #define HANDLE_CUDA_ERROR(x) \ { const auto err = x; \ if( err != cudaSuccess ) \ { printf("#ERROR(cuquantum_executor): %s in line %d\n", cudaGetErrorString(err), __LINE__); fflush(stdout); std::abort(); } \ }; #define HANDLE_CTN_ERROR(x) \ { const auto err = x; \ if( err != CUTENSORNET_STATUS_SUCCESS ) \ { printf("#ERROR(cuquantum_executor): %s in line %d\n", cutensornetGetErrorString(err), __LINE__); fflush(stdout); std::abort(); } \ }; namespace exatn { // Cached cutensornet contraction path (inside TensorNetwork): struct TensorNetworkPathCutn { cutensornetContractionOptimizerInfo_t path_cutn; bool initialized = false; void initialize(const cutensornetHandle_t handle, const cutensornetNetworkDescriptor_t network) { HANDLE_CTN_ERROR(cutensornetCreateContractionOptimizerInfo(handle,network,&path_cutn)); initialized = true; } ~TensorNetworkPathCutn() { if(initialized) cutensornetDestroyContractionOptimizerInfo(path_cutn); initialized = false; } }; namespace runtime { /** Retrieves a state of cutensornetContractionOptimizerInfo_t as a plain byte packet. **/ void getCutensornetContractionOptimizerInfoState(cutensornetHandle_t & handle, //cuTensorNet handle cutensornetContractionOptimizerInfo_t & info, //in: cutensornetContractionOptimizerInfo_t object BytePacket * info_state); //out: state of the object as a plain byte packet /** Sets a state of cutensornetContractionOptimizerInfo_t from a plain byte packet. **/ void setCutensornetContractionOptimizerInfoState(cutensornetHandle_t & handle, //cuTensorNet handle cutensornetContractionOptimizerInfo_t & info, //out: cutensornetContractionOptimizerInfo_t object BytePacket * info_state); //in: state of the object as a plain byte packet #ifdef MPI_ENABLED /** Broadcasts a cutensornetContractionOptimizerInfo_t to all MPI processes. **/ void broadcastCutensornetContractionOptimizerInfo(cutensornetHandle_t & handle, //cuTensorNet handle cutensornetContractionOptimizerInfo_t & info, //in: cutensornetContractionOptimizerInfo_t object MPICommProxy & communicator); //in: MPI communicator #endif /** Tensor descriptor (inside a tensor network) **/ struct TensorDescriptor { std::vector<int64_t> extents; //tensor dimension extents std::vector<int64_t> strides; //tensor dimension strides (optional) cudaDataType_t data_type; //tensor element data type std::size_t volume = 0; //tensor body volume std::size_t size = 0; //tensor body size (bytes) void * src_ptr = nullptr; //non-owning pointer to the tensor body source image std::vector<void*> dst_ptr; //non-owning pointer to the tensor body destination image on each GPU }; /** Tensor network processing request **/ struct TensorNetworkReq { TensorNetworkQueue::ExecStat exec_status = TensorNetworkQueue::ExecStat::None; //tensor network execution status int num_procs = 0; //total number of executing processes int proc_id = -1; //id of the current executing process #ifdef MPI_ENABLED MPICommProxy comm; //MPI communicator over executing processes #endif int64_t num_slices = 0; std::shared_ptr<numerics::TensorNetwork> network; //original tensor network specification std::unordered_map<numerics::TensorHashType, TensorDescriptor> tensor_descriptors; //tensor descriptors (shape, volume, data type, body) std::unordered_map<unsigned int, std::vector<int32_t>> tensor_modes; //indices associated with tensor dimensions (key = original tensor id) std::unordered_map<int32_t, int64_t> mode_extents; //extent of each registered tensor mode (mode --> extent) int32_t * num_modes_in = nullptr; int64_t ** extents_in = nullptr; int64_t ** strides_in = nullptr; int32_t ** modes_in = nullptr; uint32_t * alignments_in = nullptr; cutensornetTensorQualifiers_t * qualifiers_in = nullptr; std::vector<void**> gpu_data_in; //vector of owning arrays of non-owning pointers to the input tensor bodies on each GPU int32_t num_modes_out; int64_t * extents_out = nullptr; //non-owning int64_t * strides_out = nullptr; int32_t * modes_out = nullptr; //non-owning uint32_t alignment_out; std::vector<void*> gpu_data_out; //vector of non-owning pointers to the output tensor body on each GPU std::vector<void*> gpu_workspace; //vector of non-owning pointers to the work space on each GPU std::vector<uint64_t> gpu_worksize; //work space size on each GPU std::vector<void*> memory_window_ptr; //end of the GPU memory segment allocated for the tensors on each GPU cutensornetNetworkDescriptor_t net_descriptor; cutensornetContractionOptimizerConfig_t opt_config; std::shared_ptr<TensorNetworkPathCutn> opt_info; std::vector<cutensornetWorkspaceDescriptor_t> workspace_descriptor; //for each GPU std::vector<cutensornetContractionPlan_t> comp_plan; //for each GPU cudaDataType_t data_type; cutensornetComputeType_t compute_type; std::vector<cudaStream_t> gpu_stream; //CUDA stream on each GPU std::vector<cudaEvent_t> gpu_data_in_start; //event on each GPU std::vector<cudaEvent_t> gpu_data_in_finish; //event on each GPU std::vector<cudaEvent_t> gpu_compute_start; //event on each GPU std::vector<cudaEvent_t> gpu_compute_finish; //event on each GPU std::vector<cudaEvent_t> gpu_data_out_finish; //event on each GPU double prepare_start; double prepare_finish; ~TensorNetworkReq() { for(auto & stream: gpu_stream) cudaStreamSynchronize(stream); for(auto & event: gpu_data_out_finish) cudaEventDestroy(event); for(auto & event: gpu_compute_finish) cudaEventDestroy(event); for(auto & event: gpu_compute_start) cudaEventDestroy(event); for(auto & event: gpu_data_in_finish) cudaEventDestroy(event); for(auto & event: gpu_data_in_start) cudaEventDestroy(event); for(auto & stream: gpu_stream) cudaStreamDestroy(stream); for(auto & plan: comp_plan) cutensornetDestroyContractionPlan(plan); for(auto & ws_descr: workspace_descriptor) cutensornetDestroyWorkspaceDescriptor(ws_descr); cutensornetDestroyContractionOptimizerConfig(opt_config); cutensornetDestroyNetworkDescriptor(net_descriptor); //if(modes_out != nullptr) delete [] modes_out; if(strides_out != nullptr) delete [] strides_out; //if(extents_out != nullptr) delete [] extents_out; for(auto & data_in: gpu_data_in) if(data_in != nullptr) delete [] data_in; if(qualifiers_in != nullptr) delete [] qualifiers_in; if(alignments_in != nullptr) delete [] alignments_in; if(modes_in != nullptr) delete [] modes_in; if(strides_in != nullptr) delete [] strides_in; if(extents_in != nullptr) delete [] extents_in; if(num_modes_in != nullptr) delete [] num_modes_in; } }; CuQuantumExecutor::CuQuantumExecutor(TensorImplFunc tensor_data_access_func, unsigned int pipeline_depth, unsigned int num_processes, unsigned int process_rank): tensor_data_access_func_(std::move(tensor_data_access_func)), pipe_depth_(pipeline_depth), num_processes_(num_processes), process_rank_(process_rank), flops_(0.0) { static_assert(std::is_same<cutensornetHandle_t,void*>::value,"#FATAL(exatn::runtime::CuQuantumExecutor): cutensornetHandle_t != (void*)"); const size_t version = cutensornetGetVersion(); /*if(process_rank_ == 0){ std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): cuTensorNet backend version " << version << std::endl; std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): Total number of processes = " << num_processes_ << std::endl; }*/ int num_gpus = 0; auto error_code = talshDeviceCount(DEV_NVIDIA_GPU,&num_gpus); assert(error_code == TALSH_SUCCESS); for(int i = 0; i < num_gpus; ++i){ if(talshDeviceState(i,DEV_NVIDIA_GPU) >= DEV_ON){ gpu_attr_.emplace_back(std::make_pair(i,DeviceAttr{})); gpu_attr_.back().second.pipe_level = 0; gpu_attr_.back().second.workspace_ptr = talsh::getDeviceBufferBasePtr(DEV_NVIDIA_GPU,i); assert(reinterpret_cast<std::size_t>(gpu_attr_.back().second.workspace_ptr) % MEM_ALIGNMENT == 0); gpu_attr_.back().second.buffer_size = talsh::getDeviceMaxBufferSize(DEV_NVIDIA_GPU,i); std::size_t wrk_size = (std::size_t)(static_cast<float>(gpu_attr_.back().second.buffer_size) * WORKSPACE_FRACTION); wrk_size -= wrk_size % MEM_ALIGNMENT; gpu_attr_.back().second.workspace_size = wrk_size; gpu_attr_.back().second.buffer_size -= wrk_size; gpu_attr_.back().second.buffer_size -= gpu_attr_.back().second.buffer_size % MEM_ALIGNMENT; gpu_attr_.back().second.buffer_ptr = (void*)(((char*)(gpu_attr_.back().second.workspace_ptr)) + wrk_size); mem_pool_.emplace_back(LinearMemoryPool(gpu_attr_.back().second.buffer_ptr, gpu_attr_.back().second.buffer_size,MEM_ALIGNMENT)); } } //if(process_rank_ == 0) // std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): Number of available GPUs = " << gpu_attr_.size() << std::endl; if(gpu_attr_.empty()){ fatal_error("#FATAL(exatn::runtime::CuQuantumExecutor): cuQuantum backend requires at least one NVIDIA GPU per MPI process!\n"); } for(const auto & gpu: gpu_attr_){ HANDLE_CUDA_ERROR(cudaSetDevice(gpu.first)); HANDLE_CTN_ERROR(cutensornetCreate((cutensornetHandle_t*)(&gpu.second.cutn_handle))); } /*if(process_rank_ == 0){ std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): Created cuTensorNet contexts for all available GPUs" << std::endl; std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): GPU configuration:\n"; for(const auto & gpu: gpu_attr_){ std::cout << " GPU #" << gpu.first << ": wrk_ptr = " << gpu.second.workspace_ptr << ", size = " << gpu.second.workspace_size << "; buf_ptr = " << gpu.second.buffer_ptr << ", size = " << gpu.second.buffer_size << std::endl; } }*/ } CuQuantumExecutor::~CuQuantumExecutor() { sync(); for(const auto & gpu: gpu_attr_){ HANDLE_CUDA_ERROR(cudaSetDevice(gpu.first)); HANDLE_CTN_ERROR(cutensornetDestroy((cutensornetHandle_t)(gpu.second.cutn_handle))); } //if(process_rank_ == 0) // std::cout << "#DEBUG(exatn::runtime::CuQuantumExecutor): Destroyed cuTensorNet contexts for all available GPUs" << std::endl; /*std::cout << "#MSG(exatn::cuQuantum): Statistics across all GPU devices:\n"; std::cout << " Number of Flops processed: " << flops_ << std::endl; std::cout << "#END_MSG\n";*/ gpu_attr_.clear(); } #ifdef MPI_ENABLED TensorNetworkQueue::ExecStat CuQuantumExecutor::execute(std::shared_ptr<numerics::TensorNetwork> network, unsigned int num_processes, unsigned int process_rank, const MPICommProxy & communicator, const TensorOpExecHandle exec_handle) #else TensorNetworkQueue::ExecStat CuQuantumExecutor::execute(std::shared_ptr<numerics::TensorNetwork> network, unsigned int num_processes, unsigned int process_rank, const TensorOpExecHandle exec_handle) #endif { assert(network); TensorNetworkQueue::ExecStat exec_stat = TensorNetworkQueue::ExecStat::None; auto res = active_networks_.emplace(std::make_pair(exec_handle, new TensorNetworkReq{})); if(res.second){ auto tn_req = res.first->second; tn_req->network = network; tn_req->exec_status = TensorNetworkQueue::ExecStat::Idle; tn_req->num_procs = num_processes; tn_req->proc_id = process_rank; #ifdef MPI_ENABLED tn_req->comm = communicator; #endif parseTensorNetwork(tn_req); //still Idle loadTensors(tn_req); //Idle --> Loading if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Loading){ planExecution(tn_req); //Loading --> Planning (while loading data) if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Planning){ contractTensorNetwork(tn_req); //Planning --> Executing } } exec_stat = tn_req->exec_status; }else{ std::cout << "#WARNING(exatn::runtime::CuQuantumExecutor): execute: Repeated tensor network submission detected!\n"; } return exec_stat; } TensorNetworkQueue::ExecStat CuQuantumExecutor::sync(const TensorOpExecHandle exec_handle, int * error_code, int64_t * num_slices, std::vector<ExecutionTimings> * timings) { *error_code = 0; TensorNetworkQueue::ExecStat exec_stat = TensorNetworkQueue::ExecStat::None; auto iter = active_networks_.find(exec_handle); if(iter != active_networks_.end()){ auto tn_req = iter->second; if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Executing){ testCompletion(tn_req); //Executing --> Completed }else{ if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Idle) loadTensors(tn_req); //Idle --> Loading if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Loading) planExecution(tn_req); //Loading --> Planning (while loading data) if(tn_req->exec_status == TensorNetworkQueue::ExecStat::Planning) contractTensorNetwork(tn_req); //Planning --> Executing } exec_stat = tn_req->exec_status; if(exec_stat == TensorNetworkQueue::ExecStat::Completed){ if(num_slices != nullptr) *num_slices = tn_req->num_slices; if(timings != nullptr){ const int num_gpus = gpu_attr_.size(); (*timings).resize(num_gpus); for(int gpu = 0; gpu < num_gpus; ++gpu){ (*timings)[gpu].prepare = (tn_req->prepare_finish - tn_req->prepare_start) * 1000.0; //ms HANDLE_CUDA_ERROR(cudaEventElapsedTime(&((*timings)[gpu].data_in), tn_req->gpu_data_in_start[gpu],tn_req->gpu_data_in_finish[gpu])); HANDLE_CUDA_ERROR(cudaEventElapsedTime(&((*timings)[gpu].data_out), tn_req->gpu_compute_finish[gpu],tn_req->gpu_data_out_finish[gpu])); HANDLE_CUDA_ERROR(cudaEventElapsedTime(&((*timings)[gpu].compute), tn_req->gpu_compute_start[gpu],tn_req->gpu_compute_finish[gpu])); } } } tn_req.reset(); if(exec_stat == TensorNetworkQueue::ExecStat::Completed) active_networks_.erase(iter); } return exec_stat; } void CuQuantumExecutor::sync() { while(!active_networks_.empty()){ for(auto iter = active_networks_.begin(); iter != active_networks_.end(); ++iter){ int error_code = 0; const auto exec_stat = sync(iter->first,&error_code); assert(error_code == 0); if(exec_stat == TensorNetworkQueue::ExecStat::Completed) break; } } return; } static cudaDataType_t getCudaDataType(const TensorElementType elem_type) { cudaDataType_t cuda_data_type; switch(elem_type){ case TensorElementType::REAL32: cuda_data_type = CUDA_R_32F; break; case TensorElementType::REAL64: cuda_data_type = CUDA_R_64F; break; case TensorElementType::COMPLEX32: cuda_data_type = CUDA_C_32F; break; case TensorElementType::COMPLEX64: cuda_data_type = CUDA_C_64F; break; default: assert(false); } return cuda_data_type; } static cutensornetComputeType_t getCutensorComputeType(const TensorElementType elem_type) { cutensornetComputeType_t cutensor_data_type; switch(elem_type){ case TensorElementType::REAL32: cutensor_data_type = CUTENSORNET_COMPUTE_32F; break; case TensorElementType::REAL64: cutensor_data_type = CUTENSORNET_COMPUTE_64F; break; case TensorElementType::COMPLEX32: cutensor_data_type = CUTENSORNET_COMPUTE_32F; break; case TensorElementType::COMPLEX64: cutensor_data_type = CUTENSORNET_COMPUTE_64F; break; default: assert(false); } return cutensor_data_type; } void CuQuantumExecutor::acquireWorkspace(unsigned int dev, void ** workspace_ptr, uint64_t * workspace_size) { assert(dev < gpu_attr_.size()); auto & dev_attr = gpu_attr_[dev].second; *workspace_size = dev_attr.workspace_size / pipe_depth_; *workspace_ptr = (void*)((char*)(dev_attr.workspace_ptr) + ((*workspace_size) * dev_attr.pipe_level)); dev_attr.pipe_level = (++(dev_attr.pipe_level)) % pipe_depth_; return; } void CuQuantumExecutor::parseTensorNetwork(std::shared_ptr<TensorNetworkReq> tn_req) { const int num_gpus = gpu_attr_.size(); const auto & net = *(tn_req->network); const int32_t num_input_tensors = net.getNumTensors(); tn_req->num_modes_in = new int32_t[num_input_tensors]; tn_req->extents_in = new int64_t*[num_input_tensors]; tn_req->strides_in = new int64_t*[num_input_tensors]; tn_req->modes_in = new int32_t*[num_input_tensors]; tn_req->alignments_in = new uint32_t[num_input_tensors]; tn_req->qualifiers_in = new cutensornetTensorQualifiers_t[num_input_tensors]; tn_req->gpu_data_in.resize(num_gpus,nullptr); for(auto & data_in: tn_req->gpu_data_in) data_in = new void*[num_input_tensors]; tn_req->gpu_data_out.resize(num_gpus,nullptr); tn_req->gpu_workspace.resize(num_gpus,nullptr); tn_req->gpu_worksize.resize(num_gpus,0); for(unsigned int i = 0; i < num_input_tensors; ++i) tn_req->strides_in[i] = NULL; for(unsigned int i = 0; i < num_input_tensors; ++i) tn_req->alignments_in[i] = MEM_ALIGNMENT; tn_req->strides_out = NULL; tn_req->alignment_out = MEM_ALIGNMENT; if(logging_ > 0){ std::cout << "#INFO(exatn::runtime::CuQuantumExecutor): Dumping tensor network " << net.getName() << " {\n"; } std::vector<int32_t> output_tensor_modes(net.getTensor(0)->getRank()); int32_t mode_id = 0, tens_num = 0; for(auto iter = net.cbegin(); iter != net.cend(); ++iter){ const auto tens_id = iter->first; const auto & tens = iter->second; const auto tens_hash = tens.getTensor()->getTensorHash(); const auto tens_vol = tens.getTensor()->getVolume(); const auto tens_rank = tens.getRank(); const auto tens_type = tens.getElementType(); if(tens_type == TensorElementType::VOID){ std::cout << "#ERROR(exatn::runtime::CuQuantumExecutor): Network tensor #" << tens_id << " has not been allocated typed storage yet!\n"; std::abort(); } const auto & tens_legs = tens.getTensorLegs(); const auto & tens_dims = tens.getDimExtents(); auto res0 = tn_req->tensor_descriptors.emplace(std::make_pair(tens_hash,TensorDescriptor{})); if(res0.second){ auto & descr = res0.first->second; descr.extents.resize(tens_rank); for(unsigned int i = 0; i < tens_rank; ++i) descr.extents[i] = tens_dims[i]; descr.data_type = getCudaDataType(tens_type); descr.volume = tens_vol; descr.src_ptr = tensor_data_access_func_(*(tens.getTensor()),DEV_HOST,0,&(descr.size)); //`Assuming tensor body is on Host assert(descr.src_ptr != nullptr); } auto res1 = tn_req->tensor_modes.emplace(std::make_pair(tens_id,std::vector<int32_t>(tens_rank))); assert(res1.second); for(unsigned int i = 0; i < tens_rank; ++i){ const auto other_tens_id = tens_legs[i].getTensorId(); const auto other_tens_leg_id = tens_legs[i].getDimensionId(); auto other_tens_iter = tn_req->tensor_modes.find(other_tens_id); if(other_tens_iter == tn_req->tensor_modes.end()){ res1.first->second[i] = ++mode_id; auto new_mode = tn_req->mode_extents.emplace(std::make_pair(mode_id,tens_dims[i])); }else{ res1.first->second[i] = other_tens_iter->second[other_tens_leg_id]; } } if(tens_id == 0){ //output tensor tn_req->num_modes_out = tens_rank; tn_req->extents_out = res0.first->second.extents.data(); tn_req->modes_out = res1.first->second.data(); }else{ //input tensors tn_req->num_modes_in[tens_num] = tens_rank; tn_req->extents_in[tens_num] = res0.first->second.extents.data(); tn_req->modes_in[tens_num] = res1.first->second.data(); tn_req->qualifiers_in[tens_num].isConjugate = static_cast<int32_t>(tens.isComplexConjugated()); tn_req->qualifiers_in[tens_num].isConstant = static_cast<int32_t>(!tens.isOptimizable()); ++tens_num; } if(logging_ > 0){ if(tens_id == 0) { for(unsigned int i = 0; i < tens_rank; ++i) output_tensor_modes[i] = res1.first->second[i]; }else{ for(unsigned int i = 0; i < tens_rank; ++i) std::cout << " " << res1.first->second[i]; std::cout << " |"; for(unsigned int i = 0; i < tens_rank; ++i) std::cout << " " << tens_dims[i]; std::cout << std::endl; } } } if(logging_ > 0){ std::cout << "---" << std::endl; for(const auto & md: output_tensor_modes) std::cout << " " << md; std::cout << std::endl << "}" << std::endl << std::flush; } const auto tens_elem_type = net.getTensorElementType(); tn_req->data_type = getCudaDataType(tens_elem_type); tn_req->compute_type = getCutensorComputeType(tens_elem_type); //Create the GPU execution plan, stream and events on each GPU: tn_req->workspace_descriptor.resize(num_gpus); tn_req->comp_plan.resize(num_gpus); tn_req->gpu_stream.resize(num_gpus); tn_req->gpu_data_in_start.resize(num_gpus); tn_req->gpu_data_in_finish.resize(num_gpus); tn_req->gpu_compute_start.resize(num_gpus); tn_req->gpu_compute_finish.resize(num_gpus); tn_req->gpu_data_out_finish.resize(num_gpus); for(int gpu = 0; gpu < num_gpus; ++gpu){ const auto gpu_id = gpu_attr_[gpu].first; HANDLE_CUDA_ERROR(cudaSetDevice(gpu_id)); HANDLE_CUDA_ERROR(cudaStreamCreate(&(tn_req->gpu_stream[gpu]))); HANDLE_CUDA_ERROR(cudaEventCreate(&(tn_req->gpu_data_in_start[gpu]))); HANDLE_CUDA_ERROR(cudaEventCreate(&(tn_req->gpu_data_in_finish[gpu]))); HANDLE_CUDA_ERROR(cudaEventCreate(&(tn_req->gpu_compute_start[gpu]))); HANDLE_CUDA_ERROR(cudaEventCreate(&(tn_req->gpu_compute_finish[gpu]))); HANDLE_CUDA_ERROR(cudaEventCreate(&(tn_req->gpu_data_out_finish[gpu]))); } //Create the cuTensorNet tensor network descriptor (not GPU specific): HANDLE_CTN_ERROR(cutensornetCreateNetworkDescriptor(gpu_attr_[0].second.cutn_handle,num_input_tensors, tn_req->num_modes_in,tn_req->extents_in,tn_req->strides_in,tn_req->modes_in,tn_req->qualifiers_in, tn_req->num_modes_out,tn_req->extents_out,tn_req->strides_out,tn_req->modes_out, tn_req->data_type,tn_req->compute_type,&(tn_req->net_descriptor))); return; } void CuQuantumExecutor::loadTensors(std::shared_ptr<TensorNetworkReq> tn_req) { const auto out_tens_hash = tn_req->network->getTensor(0)->getTensorHash(); //Load tensors to all GPUs: const int num_gpus = gpu_attr_.size(); for(int gpu = 0; gpu < num_gpus; ++gpu){ const auto gpu_id = gpu_attr_[gpu].first; HANDLE_CUDA_ERROR(cudaSetDevice(gpu_id)); void * prev_front = mem_pool_[gpu].getFront(); bool success = true; //Acquire device memory: for(auto & descr: tn_req->tensor_descriptors){ void * dev_ptr = mem_pool_[gpu].acquireMemory(descr.second.size); success = (dev_ptr != nullptr); if(!success) break; descr.second.dst_ptr.emplace_back(dev_ptr); } if(success){ //Initiate data transfers: HANDLE_CUDA_ERROR(cudaEventRecord(tn_req->gpu_data_in_start[gpu],tn_req->gpu_stream[gpu])); for(auto & descr: tn_req->tensor_descriptors){ if(descr.first == out_tens_hash){ //output tensor: Set to 0 HANDLE_CUDA_ERROR(cudaMemsetAsync(descr.second.dst_ptr.back(),0,descr.second.size,tn_req->gpu_stream[gpu])); }else{ //input tensors: Copy from their original locations /*std::cout << "#DEBUG(exatn::CuQuantumExecutor): loadTensors: " << descr.second.dst_ptr.back() << " " << descr.second.src_ptr << " " << descr.second.size << std::endl << std::flush; //debug*/ HANDLE_CUDA_ERROR(cudaMemcpyAsync(descr.second.dst_ptr.back(),descr.second.src_ptr, descr.second.size,cudaMemcpyDefault,tn_req->gpu_stream[gpu])); } } HANDLE_CUDA_ERROR(cudaEventRecord(tn_req->gpu_data_in_finish[gpu],tn_req->gpu_stream[gpu])); tn_req->memory_window_ptr.emplace_back(mem_pool_[gpu].getFront()); auto & net = *(tn_req->network); int32_t tens_num = 0; for(auto iter = net.cbegin(); iter != net.cend(); ++iter){ const auto tens_id = iter->first; const auto & tens = iter->second; const auto tens_hash = tens.getTensor()->getTensorHash(); auto descr = tn_req->tensor_descriptors.find(tens_hash); void * dev_ptr = descr->second.dst_ptr.back(); if(tens_id == 0){ tn_req->gpu_data_out[gpu] = dev_ptr; }else{ tn_req->gpu_data_in[gpu][tens_num++] = dev_ptr; } } }else{ //no enough memory currently //Restore previous memory front: mem_pool_[gpu].restorePreviousFront(prev_front); return; } } tn_req->exec_status = TensorNetworkQueue::ExecStat::Loading; return; } void getCutensornetContractionOptimizerInfoState(cutensornetHandle_t & handle, cutensornetContractionOptimizerInfo_t & info, BytePacket * info_state) { cutensornetContractionPath_t contr_path{0,nullptr}; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); assert(contr_path.numContractions >= 0); appendToBytePacket(info_state,contr_path.numContractions); if(contr_path.numContractions > 0){ contr_path.data = new cutensornetNodePair_t[contr_path.numContractions]; assert(contr_path.data != nullptr); HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); for(int32_t i = 0; i < contr_path.numContractions; ++i){ appendToBytePacket(info_state,contr_path.data[i].first); appendToBytePacket(info_state,contr_path.data[i].second); } int32_t num_sliced_modes = 0; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_NUM_SLICED_MODES, &num_sliced_modes,sizeof(num_sliced_modes))); assert(num_sliced_modes >= 0); appendToBytePacket(info_state,num_sliced_modes); if(num_sliced_modes > 0){ cutensornetSlicingConfig_t sliced_modes{0, new cutensornetSliceInfoPair_t[num_sliced_modes]}; assert(sliced_modes.data != nullptr); HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICING_CONFIG, &sliced_modes,sizeof(sliced_modes))); assert(sliced_modes.numSlicedModes == num_sliced_modes); for(int32_t i = 0; i < num_sliced_modes; ++i){ appendToBytePacket(info_state,sliced_modes.data[i].slicedMode); appendToBytePacket(info_state,sliced_modes.data[i].slicedExtent); } delete [] sliced_modes.data; /*std::vector<int32_t> sliced_modes(num_sliced_modes); HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICED_MODE, sliced_modes.data(),sliced_modes.size()*sizeof(int32_t))); for(int32_t i = 0; i < num_sliced_modes; ++i){ appendToBytePacket(info_state,sliced_modes[i]); } std::vector<int64_t> sliced_extents(num_sliced_modes); HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICED_EXTENT, sliced_extents.data(),sliced_extents.size()*sizeof(int64_t))); for(int32_t i = 0; i < num_sliced_modes; ++i){ appendToBytePacket(info_state,sliced_extents[i]); }*/ } delete [] contr_path.data; } return; } void setCutensornetContractionOptimizerInfoState(cutensornetHandle_t & handle, cutensornetContractionOptimizerInfo_t & info, BytePacket * info_state) { cutensornetContractionPath_t contr_path{0,nullptr}; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); assert(contr_path.numContractions >= 0); int32_t num_contractions = 0; extractFromBytePacket(info_state,num_contractions); assert(num_contractions == contr_path.numContractions); if(contr_path.numContractions > 0){ contr_path.data = new cutensornetNodePair_t[contr_path.numContractions]; assert(contr_path.data != nullptr); int32_t first, second; for(int32_t i = 0; i < contr_path.numContractions; ++i){ extractFromBytePacket(info_state,first); extractFromBytePacket(info_state,second); contr_path.data[i].first = first; contr_path.data[i].second = second; } HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoSetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); int32_t num_sliced_modes = 0; extractFromBytePacket(info_state,num_sliced_modes); assert(num_sliced_modes >= 0); if(num_sliced_modes > 0){ cutensornetSlicingConfig_t sliced_modes{static_cast<uint32_t>(num_sliced_modes), new cutensornetSliceInfoPair_t[num_sliced_modes]}; assert(sliced_modes.data != nullptr); for(int32_t i = 0; i < num_sliced_modes; ++i){ extractFromBytePacket(info_state,sliced_modes.data[i].slicedMode); extractFromBytePacket(info_state,sliced_modes.data[i].slicedExtent); } HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoSetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICING_CONFIG, &sliced_modes,sizeof(sliced_modes))); delete [] sliced_modes.data; } /*HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoSetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_NUM_SLICED_MODES, &num_sliced_modes,sizeof(num_sliced_modes))); if(num_sliced_modes > 0){ std::vector<int32_t> sliced_modes(num_sliced_modes); for(int32_t i = 0; i < num_sliced_modes; ++i){ extractFromBytePacket(info_state,sliced_modes[i]); } HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoSetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICED_MODE, sliced_modes.data(),sliced_modes.size()*sizeof(int32_t))); std::vector<int64_t> sliced_extents(num_sliced_modes); for(int32_t i = 0; i < num_sliced_modes; ++i){ extractFromBytePacket(info_state,sliced_extents[i]); } HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoSetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_SLICED_EXTENT, sliced_extents.data(),sliced_extents.size()*sizeof(int64_t))); }*/ delete [] contr_path.data; } return; } #ifdef MPI_ENABLED void broadcastCutensornetContractionOptimizerInfo(cutensornetHandle_t & handle, cutensornetContractionOptimizerInfo_t & info, MPICommProxy & communicator) { double flops = 0.0; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_FLOP_COUNT, &flops,sizeof(flops))); assert(flops >= 0.0); auto & mpi_comm = communicator.getRef<MPI_Comm>(); int my_rank = -1; auto errc = MPI_Comm_rank(mpi_comm, &my_rank); assert(errc == MPI_SUCCESS); struct {double flop_count; int mpi_rank;} my_flop{flops,my_rank}, best_flop{0.0,-1}; errc = MPI_Allreduce((void*)(&my_flop),(void*)(&best_flop),1,MPI_DOUBLE_INT,MPI_MINLOC,mpi_comm); assert(errc == MPI_SUCCESS); cutensornetContractionPath_t contr_path{0,nullptr}; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); assert(contr_path.numContractions >= 0); if(contr_path.numContractions > 0){ contr_path.data = new cutensornetNodePair_t[contr_path.numContractions]; assert(contr_path.data != nullptr); HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_PATH, &contr_path,sizeof(contr_path))); int32_t num_sliced_modes = 0; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(handle, info, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_NUM_SLICED_MODES, &num_sliced_modes,sizeof(num_sliced_modes))); std::size_t packet_capacity = ((sizeof(int32_t) * 2 * contr_path.numContractions) + ((sizeof(int32_t) + sizeof(int64_t)) * num_sliced_modes)) * 2 + 1024; //upper bound BytePacket packet; initBytePacket(&packet,packet_capacity); if(my_rank == best_flop.mpi_rank){ getCutensornetContractionOptimizerInfoState(handle,info,&packet); } int packet_size = packet.size_bytes; errc = MPI_Bcast((void*)(&packet_size),1,MPI_INT,best_flop.mpi_rank,mpi_comm); assert(errc == MPI_SUCCESS); if(my_rank != best_flop.mpi_rank){ packet.size_bytes = packet_size; } errc = MPI_Bcast(packet.base_addr,packet_size,MPI_CHAR,best_flop.mpi_rank,mpi_comm); assert(errc == MPI_SUCCESS); if(my_rank != best_flop.mpi_rank){ setCutensornetContractionOptimizerInfoState(handle,info,&packet); } destroyBytePacket(&packet); delete [] contr_path.data; } return; } #endif void CuQuantumExecutor::planExecution(std::shared_ptr<TensorNetworkReq> tn_req) { //Configure tensor network contraction on all GPUs: tn_req->prepare_start = Timer::timeInSecHR(); const int num_gpus = gpu_attr_.size(); for(int gpu = 0; gpu < num_gpus; ++gpu){ const int gpu_id = gpu_attr_[gpu].first; HANDLE_CUDA_ERROR(cudaSetDevice(gpu_id)); acquireWorkspace(gpu,&(tn_req->gpu_workspace[gpu]),&(tn_req->gpu_worksize[gpu])); } const auto min_gpu_workspace_size = *(std::min_element(tn_req->gpu_worksize.cbegin(),tn_req->gpu_worksize.cend())); for(int gpu = 0; gpu < num_gpus; ++gpu){ const int gpu_id = gpu_attr_[gpu].first; HANDLE_CUDA_ERROR(cudaSetDevice(gpu_id)); if(gpu == 0){ //tensor network contraction path needs to be computed only once const int32_t min_slices = tn_req->num_procs * num_gpus; //ensure parallelism HANDLE_CTN_ERROR(cutensornetCreateContractionOptimizerConfig(gpu_attr_[gpu].second.cutn_handle,&(tn_req->opt_config))); HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_SLICER_MIN_SLICES, &min_slices,sizeof(min_slices))); const cutensornetOptimizerCost_t cost_func = CUTENSORNET_OPTIMIZER_COST_TIME; HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_COST_FUNCTION_OBJECTIVE, &cost_func,sizeof(cost_func))); const int32_t hyper_samples = (int(MIN_HYPER_SAMPLES) - 1) / (tn_req->num_procs) + 1; HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_HYPER_NUM_SAMPLES, &hyper_samples,sizeof(hyper_samples))); const int32_t reconfig_iter = RECONFIG_ITERATIONS; HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_RECONFIG_NUM_ITERATIONS, &reconfig_iter,sizeof(reconfig_iter))); const int32_t reconfig_leaves = RECONFIG_LEAVES; HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_RECONFIG_NUM_LEAVES, &reconfig_leaves,sizeof(reconfig_leaves))); const int32_t rnd_seed = (tn_req->proc_id + 1); HANDLE_CTN_ERROR(cutensornetContractionOptimizerConfigSetAttribute(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_config, CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_SEED, &rnd_seed,sizeof(rnd_seed))); auto cached_path = tn_req->network->getCuTensorNetPath(); if(cached_path){ tn_req->opt_info = cached_path; }else{ tn_req->opt_info = std::make_shared<TensorNetworkPathCutn>(); tn_req->opt_info->initialize(gpu_attr_[gpu].second.cutn_handle,tn_req->net_descriptor); HANDLE_CTN_ERROR(cutensornetContractionOptimize(gpu_attr_[gpu].second.cutn_handle, tn_req->net_descriptor,tn_req->opt_config, min_gpu_workspace_size,tn_req->opt_info->path_cutn)); tn_req->network->setCuTensorNetPath(tn_req->opt_info); } #ifdef MPI_ENABLED if(tn_req->num_procs > 1){ broadcastCutensornetContractionOptimizerInfo(gpu_attr_[gpu].second.cutn_handle,tn_req->opt_info->path_cutn,tn_req->comm); } #endif double flops = 0.0; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(gpu_attr_[gpu].second.cutn_handle, tn_req->opt_info->path_cutn, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_FLOP_COUNT, &flops,sizeof(flops))); assert(flops >= 0.0); const double total_flops = flops * 0.5 * tensorElementTypeOpFactor(tn_req->network->getTensorElementType()); flops_ += ((flops * 0.5) / static_cast<double>(tn_req->num_procs)) * //assuming uniform work distribution (not true in general) tensorElementTypeOpFactor(tn_req->network->getTensorElementType()); //std::cout << "#INFO(exatn::CuQuantumExecutor): Path found for network " << tn_req->network->getTensorNetworkHash() // << " with total Flop count = " << std::scientific << (total_flops/1e9) << " Gflop" << std::endl; //debug tn_req->num_slices = 0; HANDLE_CTN_ERROR(cutensornetContractionOptimizerInfoGetAttribute(gpu_attr_[gpu].second.cutn_handle, tn_req->opt_info->path_cutn, CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_NUM_SLICES, &(tn_req->num_slices),sizeof(tn_req->num_slices))); assert(tn_req->num_slices > 0); } HANDLE_CTN_ERROR(cutensornetCreateWorkspaceDescriptor(gpu_attr_[gpu].second.cutn_handle,&(tn_req->workspace_descriptor[gpu]))); HANDLE_CTN_ERROR(cutensornetWorkspaceComputeSizes(gpu_attr_[gpu].second.cutn_handle, tn_req->net_descriptor,tn_req->opt_info->path_cutn, tn_req->workspace_descriptor[gpu])); uint64_t required_workspace_size = 0; HANDLE_CTN_ERROR(cutensornetWorkspaceGetSize(gpu_attr_[gpu].second.cutn_handle, tn_req->workspace_descriptor[gpu], CUTENSORNET_WORKSIZE_PREF_MIN, CUTENSORNET_MEMSPACE_DEVICE, &required_workspace_size)); if(required_workspace_size > tn_req->gpu_worksize[gpu]){ fatal_error("#ERROR(exatn::CuQuantumExecutor::planExecution): Insufficient work space on GPU "+std::to_string(gpu)+"!\n"); } HANDLE_CTN_ERROR(cutensornetWorkspaceSet(gpu_attr_[gpu].second.cutn_handle, tn_req->workspace_descriptor[gpu], CUTENSORNET_MEMSPACE_DEVICE, tn_req->gpu_workspace[gpu],tn_req->gpu_worksize[gpu])); HANDLE_CTN_ERROR(cutensornetCreateContractionPlan(gpu_attr_[gpu].second.cutn_handle, tn_req->net_descriptor,tn_req->opt_info->path_cutn, tn_req->workspace_descriptor[gpu],&(tn_req->comp_plan[gpu]))); } tn_req->prepare_finish = Timer::timeInSecHR(); tn_req->exec_status = TensorNetworkQueue::ExecStat::Planning; return; } void accumulateOutputOnHost(TensorElementType elem_type, void * out_ptr, void * tmp_ptr, std::size_t vol) { auto accumulate = [](auto * ptr0, const auto * ptr1, auto count){ #pragma omp parallel for schedule(guided) shared(count,ptr0,ptr1) for(std::size_t i = 0; i < count; ++i) ptr0[i] += ptr1[i]; return; }; switch(elem_type){ case TensorElementType::REAL32: accumulate(static_cast<float*>(out_ptr),static_cast<float*>(tmp_ptr),vol); break; case TensorElementType::REAL64: accumulate(static_cast<double*>(out_ptr),static_cast<double*>(tmp_ptr),vol); break; case TensorElementType::COMPLEX32: accumulate(static_cast<std::complex<float>*>(out_ptr),static_cast<std::complex<float>*>(tmp_ptr),vol); break; case TensorElementType::COMPLEX64: accumulate(static_cast<std::complex<double>*>(out_ptr),static_cast<std::complex<double>*>(tmp_ptr),vol); break; default: assert(false); } return; } void CuQuantumExecutor::contractTensorNetwork(std::shared_ptr<TensorNetworkReq> tn_req) { //Execute the contraction plans on all GPUs: const int num_gpus = gpu_attr_.size(); const int64_t total_gpus = tn_req->num_procs * num_gpus; for(int gpu = 0; gpu < num_gpus; ++gpu){ HANDLE_CUDA_ERROR(cudaEventRecord(tn_req->gpu_compute_start[gpu],tn_req->gpu_stream[gpu])); } for(int64_t slice_base_id = tn_req->proc_id * num_gpus; slice_base_id < tn_req->num_slices; slice_base_id += total_gpus){ const int64_t slice_end = std::min(tn_req->num_slices,(int64_t)(slice_base_id + num_gpus)); for(int64_t slice_id = slice_base_id; slice_id < slice_end; ++slice_id){ const int gpu = static_cast<int>(slice_id - slice_base_id); HANDLE_CTN_ERROR(cutensornetContraction(gpu_attr_[gpu].second.cutn_handle, tn_req->comp_plan[gpu], tn_req->gpu_data_in[gpu],tn_req->gpu_data_out[gpu], tn_req->workspace_descriptor[gpu], slice_id,tn_req->gpu_stream[gpu])); } } for(int gpu = 0; gpu < num_gpus; ++gpu){ HANDLE_CUDA_ERROR(cudaEventRecord(tn_req->gpu_compute_finish[gpu],tn_req->gpu_stream[gpu])); } //Retrieve the output tensor from all GPUs and perform reduction: auto output_tensor = tn_req->network->getTensor(0); const auto out_elem_type = output_tensor->getElementType(); const auto out_elem_size = TensorElementTypeSize(out_elem_type); const auto output_hash = output_tensor->getTensorHash(); auto iter = tn_req->tensor_descriptors.find(output_hash); assert(iter != tn_req->tensor_descriptors.cend()); const auto & descr = iter->second; if(num_gpus > 1){ //`Blocking solution for output reduction (temporary) const auto dev_id = talshFlatDevId(DEV_HOST,0); void * host_out_tens = nullptr; auto errc = mem_allocate(dev_id,descr.size,YEP,&host_out_tens); if(errc != 0 || host_out_tens == nullptr){ fatal_error("#ERROR(exatn::CuQuantumExecutor::contractTensorNetwork): Insufficient memory space in the Host buffer!\n"); } struct ReductionBuf{void * tmp_ptr; void * out_ptr; void * gpu_ptr; std::size_t vol;}; assert(MEM_ALIGNMENT % out_elem_size == 0); const auto vol0 = (descr.volume / 2) - ((descr.volume / 2) % (MEM_ALIGNMENT / out_elem_size)); const auto vol1 = descr.volume - vol0; std::vector<ReductionBuf> red_buf = {ReductionBuf{host_out_tens, descr.src_ptr, nullptr, vol0}, ReductionBuf{(void*)(((char*)host_out_tens)+(vol0*out_elem_size)), (void*)(((char*)descr.src_ptr)+(vol0*out_elem_size)), nullptr, vol1}}; bool first_iteration = true; for(int gpu = 0; gpu < num_gpus; ++gpu){ red_buf[0].gpu_ptr = descr.dst_ptr[gpu]; red_buf[1].gpu_ptr = (void*)(((char*)descr.dst_ptr[gpu])+(vol0*out_elem_size)); for(int part = 0; part < 2; ++part){ HANDLE_CUDA_ERROR(cudaMemcpyAsync(red_buf[part].tmp_ptr,red_buf[part].gpu_ptr, red_buf[part].vol*out_elem_size,cudaMemcpyDefault,tn_req->gpu_stream[gpu])); if(first_iteration){ HANDLE_CUDA_ERROR(cudaEventRecord(tn_req->gpu_data_out_finish[gpu],tn_req->gpu_stream[gpu])); first_iteration = false; }else{ if(part == 0){ HANDLE_CUDA_ERROR(cudaEventSynchronize(tn_req->gpu_data_out_finish[gpu-1])); }else{ HANDLE_CUDA_ERROR(cudaEventSynchronize(tn_req->gpu_data_out_finish[gpu])); } HANDLE_CUDA_ERROR(cudaEventRecord(tn_req->gpu_data_out_finish[gpu],tn_req->gpu_stream[gpu])); const auto other_part = 1 - part; accumulateOutputOnHost(out_elem_type,red_buf[other_part].out_ptr,red_buf[other_part].tmp_ptr,red_buf[other_part].vol); } } } errc = mem_free(dev_id,&host_out_tens); if(errc != 0){ fatal_error("#ERROR(exatn::CuQuantumExecutor::contractTensorNetwork): Unable to free a temporary Host buffer entry!\n"); } }else{ HANDLE_CUDA_ERROR(cudaMemcpyAsync(descr.src_ptr,descr.dst_ptr[0], descr.size,cudaMemcpyDefault,tn_req->gpu_stream[0])); HANDLE_CUDA_ERROR(cudaEventRecord(tn_req->gpu_data_out_finish[0],tn_req->gpu_stream[0])); } tn_req->exec_status = TensorNetworkQueue::ExecStat::Executing; return; } void CuQuantumExecutor::testCompletion(std::shared_ptr<TensorNetworkReq> tn_req) { //Test completion on all GPUs: bool all_completed = true; const int num_gpus = gpu_attr_.size(); for(int gpu = 0; gpu < num_gpus; ++gpu){ const auto gpu_id = gpu_attr_[gpu].first; HANDLE_CUDA_ERROR(cudaSetDevice(gpu_id)); cudaError_t cuda_error = cudaEventQuery(tn_req->gpu_data_out_finish[gpu]); if(cuda_error != cudaErrorNotReady){ if(tn_req->memory_window_ptr[gpu] != nullptr){ mem_pool_[gpu].releaseMemory(tn_req->memory_window_ptr[gpu]); tn_req->memory_window_ptr[gpu] = nullptr; } }else{ all_completed = false; } } if(all_completed){ #ifdef MPI_ENABLED //Global output reduction across all participating MPI processes: if(tn_req->num_procs > 1){ const auto out_elem_type = tn_req->network->getTensor(0)->getElementType(); const auto out_tens_hash = tn_req->network->getTensor(0)->getTensorHash(); const auto & descr = tn_req->tensor_descriptors[out_tens_hash]; auto & mpi_comm = tn_req->comm.getRef<MPI_Comm>(); int errc = MPI_SUCCESS; switch(out_elem_type){ case TensorElementType::REAL32: errc = MPI_Allreduce(MPI_IN_PLACE,descr.src_ptr,descr.volume,MPI_FLOAT,MPI_SUM,mpi_comm); break; case TensorElementType::REAL64: errc = MPI_Allreduce(MPI_IN_PLACE,descr.src_ptr,descr.volume,MPI_DOUBLE,MPI_SUM,mpi_comm); break; case TensorElementType::COMPLEX32: errc = MPI_Allreduce(MPI_IN_PLACE,descr.src_ptr,descr.volume,MPI_CXX_FLOAT_COMPLEX,MPI_SUM,mpi_comm); break; case TensorElementType::COMPLEX64: errc = MPI_Allreduce(MPI_IN_PLACE,descr.src_ptr,descr.volume,MPI_CXX_DOUBLE_COMPLEX,MPI_SUM,mpi_comm); break; default: fatal_error("#ERROR(exatn::CuQuantumExecutor::testCompletion): Invalid tensor element type!"); } assert(errc == MPI_SUCCESS); } #endif tn_req->exec_status = TensorNetworkQueue::ExecStat::Completed; } return; } ExecutionTimings ExecutionTimings::computeAverage(const std::vector<ExecutionTimings> & timings) { ExecutionTimings result_timings = std::accumulate(timings.cbegin(),timings.cend(), ExecutionTimings{0.0f,0.0f,0.0f,0.0f}, [](const ExecutionTimings & first, const ExecutionTimings & second){ return ExecutionTimings{first.prepare + second.prepare, first.data_in + second.data_in, first.data_out + second.data_out, first.compute + second.compute}; }); if(!timings.empty()){ const float num_elems = static_cast<float>(timings.size()); result_timings.prepare /= num_elems; result_timings.data_in /= num_elems; result_timings.data_out /= num_elems; result_timings.compute /= num_elems; } return std::move(result_timings); } ExecutionTimings ExecutionTimings::computeWorst(const std::vector<ExecutionTimings> & timings) { ExecutionTimings result_timings{0.0f,0.0f,0.0f,0.0f}; if(!timings.empty()){ result_timings = std::accumulate(timings.cbegin()+1,timings.cend(), timings[0], [](const ExecutionTimings & first, const ExecutionTimings & second){ return ExecutionTimings{std::max(first.prepare,second.prepare), std::max(first.data_in,second.data_in), std::max(first.data_out,second.data_out), std::max(first.compute,second.compute)}; }); } return std::move(result_timings); } ExecutionTimings ExecutionTimings::computeBest(const std::vector<ExecutionTimings> & timings) { ExecutionTimings result_timings{0.0f,0.0f,0.0f,0.0f}; if(!timings.empty()){ result_timings = std::accumulate(timings.cbegin()+1,timings.cend(), timings[0], [](const ExecutionTimings & first, const ExecutionTimings & second){ return ExecutionTimings{std::min(first.prepare,second.prepare), std::min(first.data_in,second.data_in), std::min(first.data_out,second.data_out), std::min(first.compute,second.compute)}; }); } return std::move(result_timings); } int CuQuantumExecutor::logging_ {0}; } //namespace runtime } //namespace exatn #endif //CUQUANTUM
10496e9fcdab8e816cc5f060412dee67b5490e1d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021 Quim Aguado * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <fstream> #include <iostream> #include <cstdlib> #include <cstring> #include <chrono> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <nvbio/basic/types.h> #include <nvbio/basic/vector.h> #include <nvbio/alignment/alignment.h> #include <nvbio/alignment/alignment_base.h> #include <nvbio/alignment/batched.h> #define TIMER_INIT std::chrono::steady_clock::time_point bm_timer_begin; \ std::chrono::steady_clock::time_point bm_timer_end; #define TIMER_START bm_timer_begin = std::chrono::steady_clock::now(); #define TIMER_STOP bm_timer_end = std::chrono::steady_clock::now(); #define TIMER_MS std::chrono::duration_cast<std::chrono::milliseconds> (bm_timer_end - bm_timer_begin).count() #define ALPHABET_SIZE 4 const uint32_t BAND_LEN = 31; const char *USAGE_STR = "Usage:\n" "nvbio-benchmark <file> <max_seq_len> <num_alignments> " "<batch_size=50000>"; class Sequences { public: size_t seq_len; size_t num_alignments; char* sequences_buffer; int* sequences_len; Sequences (char* filepath, int seq_len, int num_alignments) :\ seq_len(seq_len), num_alignments(num_alignments) { std::cout << "Sequences object:" << std::endl << "\tFile: " << filepath << std::endl << "\tSequence length: " << seq_len << std::endl << "\tNumber of alignments: " << num_alignments << std::endl; std::size_t seq_bytes_to_alloc = ((size_t)num_alignments * (size_t)seq_len * 2L); std::cout << "Allocating " << (seq_bytes_to_alloc / (1 << 20)) << "MiB of memory to store the sequences" << std::endl; try { this->sequences_buffer = new char[seq_bytes_to_alloc]; } catch (std::bad_alloc & exception) { std::cerr << "bad_alloc detected: " << exception.what(); exit(-1); } memset(this->sequences_buffer, 0, seq_bytes_to_alloc); this->sequences_len = new int[(size_t)num_alignments * 2L]; std::ifstream file(filepath, std::ios::binary | std::ios::ate); if (file.fail()) { std::cerr << "Could not open file: \"" << filepath << "\"" << std::endl; // TODO exit(-1); } std::streamsize size = file.tellg(); file.seekg(0, std::ios::beg); TIMER_INIT TIMER_START std::string line; size_t sequences_read = 0; while(std::getline(file, line) && sequences_read < (num_alignments*2)) { strncpy(this->get_sequence(sequences_read), // +1 to avoid the initial > and < line.c_str() + 1, seq_len); this->sequences_len[sequences_read] = line.length() - 1; sequences_read++; } TIMER_STOP std::cout << "Read " << sequences_read << " sequences in " << TIMER_MS << "ms." << std::endl; }; ~Sequences () { delete [] this->sequences_buffer; delete [] this->sequences_len; } char* get_sequence(size_t n) const { #ifdef DEBUG // Only for debug purposes if (n >= this->num_alignments*2) { std::cout << "Trying to read too far... n=" << n << std::endl; return 0; } #endif return this->sequences_buffer + (this->seq_len * n); } }; // Function based on the example in the NVidia blog, but adapted: // https://developer.nvidia.com/blog/accelerating-bioinformatics-nvbio/ // Time is returned in ms void batch_alignment_test (const Sequences &sequences, const size_t batch_offset, const uint32_t batch_size, double *time) { using namespace nvbio; // build two concatenated string sets, one for the patterns, // containing a concatenated sequence of strings of 100 // characters each, and one for the texts, // containing 200 characters each const uint32 n_strings = batch_size; const uint32 pattern_len = sequences.seq_len; const uint32 text_len = sequences.seq_len; #ifdef DEBUG std::cerr << "Batch size: " << n_strings << std::endl << "Pattern length: " << pattern_len << std::endl << "Text length: " << text_len << std::endl; #endif // setup the strings on the host nvbio::vector<host_tag, uint8> h_pattern(n_strings * pattern_len); nvbio::vector<host_tag, uint8> h_text(n_strings * text_len); // Copy patterns for this batch for (uint32 i = 0; i < n_strings; i++) { //std::cout << "copying pattern "<< i << std::endl; memcpy((void*)&h_pattern[i * pattern_len], sequences.get_sequence(batch_offset + i*2), sequences.seq_len); } // Copy texts for this batch for (uint32 i = 0; i < n_strings; i++) { memcpy((void*)&h_text[i * text_len], sequences.get_sequence(batch_offset + (i+1)*2), sequences.seq_len); } TIMER_INIT TIMER_START // copy the strings storage to the device nvbio::vector<device_tag, uint8> d_pattern( h_pattern ); nvbio::vector<device_tag, uint8> d_text( h_text ); // allocate two vectors representing the string offsets nvbio::vector<device_tag, uint32> d_pattern_offsets( n_strings+1 ); nvbio::vector<device_tag, uint32> d_text_offsets( n_strings+1 ); // prepare the string offsets using Thrust's sequence() // function, setting up the offset of pattern i as i * pattern_len, // and the offset of text i as i * text_len thrust::sequence( d_pattern_offsets.begin(), d_pattern_offsets.end(), 0u, pattern_len ); thrust::sequence( d_text_offsets.begin(), d_text_offsets.end(), 0u, text_len ); // prepare a vector of alignment sinks nvbio::vector<device_tag, aln::BestSink<uint32> > sinks( n_strings ); // and execute the batch alignment, on a GPU device aln::batch_banded_alignment_score<BAND_LEN>( aln::make_edit_distance_aligner <aln::GLOBAL, aln::MyersTag<ALPHABET_SIZE> >(), make_concatenated_string_set( n_strings, d_pattern.begin(), d_pattern_offsets.begin() ), make_concatenated_string_set( n_strings, d_text.begin(), d_text_offsets.begin() ), sinks.begin(), aln::DeviceThreadScheduler(), sequences.seq_len, sequences.seq_len ); TIMER_STOP *time += TIMER_MS; } int main (int argc, char* argv[]) { char* filepath; size_t batch_size = 50000; int seq_size = 0; int num_alignments; if (argc >= 4) { filepath = argv[1]; seq_size = std::atoi(argv[2]); num_alignments = std::atoi(argv[3]); } if (argc == 5) { batch_size = std::atoi(argv[4]); } if (argc < 4 || argc > 5) { std::cerr << USAGE_STR << std::endl; return EXIT_FAILURE; } if (batch_size > num_alignments) { std::cerr << "Batch size can not be bigger than the number of alignments" << "\nChanging batch size to " << num_alignments << std::endl; batch_size = num_alignments; } else { std::cout << "Batch size set to " << batch_size << std::endl; } Sequences sequences(filepath, seq_size, num_alignments); // Total time in milliseconds double total_time = 0; size_t alignments_computed = 0; int cnt = 0; while (alignments_computed < num_alignments) { size_t curr_batch_size = ::min(batch_size, num_alignments - alignments_computed); batch_alignment_test (sequences, alignments_computed, curr_batch_size, &total_time); std::cerr << "Batch " << cnt++ << " executed." << std::endl; alignments_computed += batch_size; } std::cout << "Executed " << num_alignments << " alignments in " << total_time << "ms." << std::endl << "Performance: " << (double)((num_alignments * (uint64_t)(seq_size*seq_size)) / (total_time/1000)) / 1000000000 << " GCUPs" << std::endl; return 0; }
10496e9fcdab8e816cc5f060412dee67b5490e1d.cu
/* * Copyright (c) 2021 Quim Aguado * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <fstream> #include <iostream> #include <cstdlib> #include <cstring> #include <chrono> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <nvbio/basic/types.h> #include <nvbio/basic/vector.h> #include <nvbio/alignment/alignment.h> #include <nvbio/alignment/alignment_base.h> #include <nvbio/alignment/batched.h> #define TIMER_INIT std::chrono::steady_clock::time_point bm_timer_begin; \ std::chrono::steady_clock::time_point bm_timer_end; #define TIMER_START bm_timer_begin = std::chrono::steady_clock::now(); #define TIMER_STOP bm_timer_end = std::chrono::steady_clock::now(); #define TIMER_MS std::chrono::duration_cast<std::chrono::milliseconds> (bm_timer_end - bm_timer_begin).count() #define ALPHABET_SIZE 4 const uint32_t BAND_LEN = 31; const char *USAGE_STR = "Usage:\n" "nvbio-benchmark <file> <max_seq_len> <num_alignments> " "<batch_size=50000>"; class Sequences { public: size_t seq_len; size_t num_alignments; char* sequences_buffer; int* sequences_len; Sequences (char* filepath, int seq_len, int num_alignments) :\ seq_len(seq_len), num_alignments(num_alignments) { std::cout << "Sequences object:" << std::endl << "\tFile: " << filepath << std::endl << "\tSequence length: " << seq_len << std::endl << "\tNumber of alignments: " << num_alignments << std::endl; std::size_t seq_bytes_to_alloc = ((size_t)num_alignments * (size_t)seq_len * 2L); std::cout << "Allocating " << (seq_bytes_to_alloc / (1 << 20)) << "MiB of memory to store the sequences" << std::endl; try { this->sequences_buffer = new char[seq_bytes_to_alloc]; } catch (std::bad_alloc & exception) { std::cerr << "bad_alloc detected: " << exception.what(); exit(-1); } memset(this->sequences_buffer, 0, seq_bytes_to_alloc); this->sequences_len = new int[(size_t)num_alignments * 2L]; std::ifstream file(filepath, std::ios::binary | std::ios::ate); if (file.fail()) { std::cerr << "Could not open file: \"" << filepath << "\"" << std::endl; // TODO exit(-1); } std::streamsize size = file.tellg(); file.seekg(0, std::ios::beg); TIMER_INIT TIMER_START std::string line; size_t sequences_read = 0; while(std::getline(file, line) && sequences_read < (num_alignments*2)) { strncpy(this->get_sequence(sequences_read), // +1 to avoid the initial > and < line.c_str() + 1, seq_len); this->sequences_len[sequences_read] = line.length() - 1; sequences_read++; } TIMER_STOP std::cout << "Read " << sequences_read << " sequences in " << TIMER_MS << "ms." << std::endl; }; ~Sequences () { delete [] this->sequences_buffer; delete [] this->sequences_len; } char* get_sequence(size_t n) const { #ifdef DEBUG // Only for debug purposes if (n >= this->num_alignments*2) { std::cout << "Trying to read too far... n=" << n << std::endl; return 0; } #endif return this->sequences_buffer + (this->seq_len * n); } }; // Function based on the example in the NVidia blog, but adapted: // https://developer.nvidia.com/blog/accelerating-bioinformatics-nvbio/ // Time is returned in ms void batch_alignment_test (const Sequences &sequences, const size_t batch_offset, const uint32_t batch_size, double *time) { using namespace nvbio; // build two concatenated string sets, one for the patterns, // containing a concatenated sequence of strings of 100 // characters each, and one for the texts, // containing 200 characters each const uint32 n_strings = batch_size; const uint32 pattern_len = sequences.seq_len; const uint32 text_len = sequences.seq_len; #ifdef DEBUG std::cerr << "Batch size: " << n_strings << std::endl << "Pattern length: " << pattern_len << std::endl << "Text length: " << text_len << std::endl; #endif // setup the strings on the host nvbio::vector<host_tag, uint8> h_pattern(n_strings * pattern_len); nvbio::vector<host_tag, uint8> h_text(n_strings * text_len); // Copy patterns for this batch for (uint32 i = 0; i < n_strings; i++) { //std::cout << "copying pattern "<< i << std::endl; memcpy((void*)&h_pattern[i * pattern_len], sequences.get_sequence(batch_offset + i*2), sequences.seq_len); } // Copy texts for this batch for (uint32 i = 0; i < n_strings; i++) { memcpy((void*)&h_text[i * text_len], sequences.get_sequence(batch_offset + (i+1)*2), sequences.seq_len); } TIMER_INIT TIMER_START // copy the strings storage to the device nvbio::vector<device_tag, uint8> d_pattern( h_pattern ); nvbio::vector<device_tag, uint8> d_text( h_text ); // allocate two vectors representing the string offsets nvbio::vector<device_tag, uint32> d_pattern_offsets( n_strings+1 ); nvbio::vector<device_tag, uint32> d_text_offsets( n_strings+1 ); // prepare the string offsets using Thrust's sequence() // function, setting up the offset of pattern i as i * pattern_len, // and the offset of text i as i * text_len thrust::sequence( d_pattern_offsets.begin(), d_pattern_offsets.end(), 0u, pattern_len ); thrust::sequence( d_text_offsets.begin(), d_text_offsets.end(), 0u, text_len ); // prepare a vector of alignment sinks nvbio::vector<device_tag, aln::BestSink<uint32> > sinks( n_strings ); // and execute the batch alignment, on a GPU device aln::batch_banded_alignment_score<BAND_LEN>( aln::make_edit_distance_aligner <aln::GLOBAL, aln::MyersTag<ALPHABET_SIZE> >(), make_concatenated_string_set( n_strings, d_pattern.begin(), d_pattern_offsets.begin() ), make_concatenated_string_set( n_strings, d_text.begin(), d_text_offsets.begin() ), sinks.begin(), aln::DeviceThreadScheduler(), sequences.seq_len, sequences.seq_len ); TIMER_STOP *time += TIMER_MS; } int main (int argc, char* argv[]) { char* filepath; size_t batch_size = 50000; int seq_size = 0; int num_alignments; if (argc >= 4) { filepath = argv[1]; seq_size = std::atoi(argv[2]); num_alignments = std::atoi(argv[3]); } if (argc == 5) { batch_size = std::atoi(argv[4]); } if (argc < 4 || argc > 5) { std::cerr << USAGE_STR << std::endl; return EXIT_FAILURE; } if (batch_size > num_alignments) { std::cerr << "Batch size can not be bigger than the number of alignments" << "\nChanging batch size to " << num_alignments << std::endl; batch_size = num_alignments; } else { std::cout << "Batch size set to " << batch_size << std::endl; } Sequences sequences(filepath, seq_size, num_alignments); // Total time in milliseconds double total_time = 0; size_t alignments_computed = 0; int cnt = 0; while (alignments_computed < num_alignments) { size_t curr_batch_size = std::min(batch_size, num_alignments - alignments_computed); batch_alignment_test (sequences, alignments_computed, curr_batch_size, &total_time); std::cerr << "Batch " << cnt++ << " executed." << std::endl; alignments_computed += batch_size; } std::cout << "Executed " << num_alignments << " alignments in " << total_time << "ms." << std::endl << "Performance: " << (double)((num_alignments * (uint64_t)(seq_size*seq_size)) / (total_time/1000)) / 1000000000 << " GCUPs" << std::endl; return 0; }
28e851f2f26ceee5b3f28b322416a5e57faa9669.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
28e851f2f26ceee5b3f28b322416a5e57faa9669.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
f3e930a5a6e163d5b52b715a68310c03fdbb13ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef GRAVITY #ifdef CUFFT #include "potential_CUFFT_3D.h" #include"../global_cuda.h" #include "../io.h" #include<iostream> Potential_CUFFT_3D::Potential_CUFFT_3D( void ){} void Potential_CUFFT_3D::Initialize( Real Lx, Real Ly, Real Lz, Real x_min, Real y_min, Real z_min, int nx, int ny, int nz, int nx_real, int ny_real, int nz_real, Real dx_real, Real dy_real, Real dz_real){ // Lbox_x = Lx; Lbox_y = Ly; Lbox_z = Lz; nx_total = nx; ny_total = ny; nz_total = nz; nx_local = nx_real; ny_local = ny_real; nz_local = nz_real; dx = dx_real; dy = dy_real; dz = dz_real; n_cells_local = nx_local*ny_local*nz_local; n_cells_total = nx_total*ny_total*nz_total; chprintf( " Using Poisson Solver: CUFFT\n"); chprintf( " CUFFT: L[ %f %f %f ] N[ %d %d %d ] dx[ %f %f %f ]\n", Lbox_x, Lbox_y, Lbox_z, nx_local, ny_local, nz_local, dx, dy, dz ); chprintf( " CUFFT: Allocating memory...\n"); AllocateMemory_CPU(); AllocateMemory_GPU(); chprintf( " CUFFT: Creating FFT plan...\n"); hipfftPlan3d( &plan_cufft_fwd, nz_local, ny_local, nx_local, HIPFFT_Z2Z); hipfftPlan3d( &plan_cufft_bwd, nz_local, ny_local, nx_local, HIPFFT_Z2Z); chprintf( " CUFFT: Computing K for Gravity Green Funtion\n"); hipMalloc( (void**)&F.G_d, n_cells_local*sizeof(Real)); Get_K_for_Green_function(); threads_per_block = 512; blocks_per_grid = (( n_cells_local - 1 ) / threads_per_block) + 1; chprintf( " CUFFT: Using %d threads and %d blocks for applying G funtion: %d \n", threads_per_block, blocks_per_grid, threads_per_block*blocks_per_grid); } void Potential_CUFFT_3D::AllocateMemory_CPU( void ){ F.output_h = (Complex_cufft *) malloc(n_cells_local*sizeof(Complex_cufft)); F.G_h = (Real *) malloc(n_cells_local*sizeof(Real_cufft)); } void Potential_CUFFT_3D::AllocateMemory_GPU( void ){ hipMalloc( (void**)&F.input_real_d, n_cells_local*sizeof(Real_cufft)); hipMalloc( (void**)&F.input_d, n_cells_local*sizeof(Complex_cufft)); hipMalloc( (void**)&F.transform_d, n_cells_local*sizeof(Complex_cufft)); hipMalloc( (void**)&F.output_d, n_cells_local*sizeof(Complex_cufft)); hipMalloc( (void**)&F.G_d, n_cells_local*sizeof(Real_cufft)); CudaCheckError(); } void Potential_CUFFT_3D::FreeMemory_GPU( void ){ hipFree( F.input_real_d ); hipFree( F.input_d ); hipFree( F.output_d ); hipFree( F.transform_d ); hipFree( F.G_d ); CudaCheckError(); } void Potential_CUFFT_3D::Reset( void ){ // chprintf("Reset CUFFT\n"); free( F.output_h ); free( F.G_h ); FreeMemory_GPU(); } void Potential_CUFFT_3D::Get_K_for_Green_function( void){ Real kx, ky, kz, Gx, Gy, Gz, G; int id; for (int k=0; k<nz_local; k++){ kz = 2*M_PI*k/nz_local; Gz = sin( kz/2 ); for (int j=0; j<ny_local; j++){ ky = 2*M_PI*j/ny_local; Gy = sin( ky/2 ); for ( int i=0; i<nx_local; i++){ id = i + j*nx_local + k*nx_local*ny_local; kx = 2*M_PI*i/nx_local; Gx = sin( kx/2 ); G = -1 / ( Gx*Gx + Gy*Gy + Gz*Gz ) * dx * dx / 4 ; if ( id == 0 ) G = 1; F.G_h[id] = G; // F.G_h[id] = 0.1; } } } hipMemcpy( F.G_d, F.G_h, n_cells_local*sizeof(Real), hipMemcpyHostToDevice ); CudaCheckError(); } __global__ void Copy_Input_Kernel( int n_cells, Real *input_h, Complex_cufft *input_d, Real Grav_Constant, Real dens_avrg, Real current_a ){ int t_id = threadIdx.x + blockIdx.x*blockDim.x; if ( t_id < n_cells ){ #ifdef COSMOLOGY input_d[t_id].x = 4 * M_PI * Grav_Constant * ( input_h[t_id] - dens_avrg ) / current_a; #else input_d[t_id].x = 4 * M_PI * Grav_Constant * input_h[t_id]; #endif input_d[t_id].y = 0.0; } } void Potential_CUFFT_3D::Copy_Input( Real *input_density, Real Grav_Constant, Real dens_avrg, Real current_a ){ hipMemcpy( F.input_real_d, input_density, n_cells_local*sizeof(Real_cufft), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( Copy_Input_Kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, n_cells_local, F.input_real_d, F.input_d, Grav_Constant, dens_avrg, current_a ); } void Potential_CUFFT_3D::Copy_Output( Real *output_potential ){ hipMemcpy( F.output_h, F.output_d, n_cells_local*sizeof(Complex_cufft), hipMemcpyDeviceToHost ); int id, id_pot; int i, k, j; for (k=0; k<nz_local; k++) { for (j=0; j<ny_local; j++) { for (i=0; i<nx_local; i++) { id = i + j*nx_local + k*nx_local*ny_local; id_pot = (i+N_GHOST_POTENTIAL) + (j+N_GHOST_POTENTIAL)*(nx_local+2*N_GHOST_POTENTIAL) + (k+N_GHOST_POTENTIAL)*(nx_local+2*N_GHOST_POTENTIAL)*(ny_local+2*N_GHOST_POTENTIAL); output_potential[id_pot] = F.output_h[id].x / n_cells_local; } } } } __global__ void Apply_G_Funtion( int n_cells, Complex_cufft *transform, Real *G ){ int t_id = threadIdx.x + blockIdx.x*blockDim.x; Real G_val; if ( t_id < n_cells ){ G_val = G[t_id]; if ( t_id == 0 ) G_val = 1.0; transform[t_id].x *= G_val; transform[t_id].y *= G_val; if ( t_id == 0 ){ transform[t_id].x = 0; transform[t_id].y = 0; } } } Real Potential_CUFFT_3D::Get_Potential( Real *input_density, Real *output_potential, Real Grav_Constant, Real dens_avrg, Real current_a ){ // hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // // AllocateMemory_GPU(); Copy_Input( input_density, Grav_Constant, dens_avrg, current_a ); hipfftExecZ2Z( plan_cufft_fwd, F.input_d, F.transform_d, HIPFFT_FORWARD ); hipLaunchKernelGGL(( Apply_G_Funtion), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, n_cells_local, F.transform_d, F.G_d ); hipfftExecZ2Z( plan_cufft_bwd, F.transform_d, F.output_d, HIPFFT_BACKWARD ); Copy_Output( output_potential ); // // FreeMemory_GPU(); // hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); // chprintf( " CUFFT: Potential Time = %f msecs\n", milliseconds); // return (Real) milliseconds; return 0; } #endif //POTENTIAL_CUFFT #endif //GRAVITY
f3e930a5a6e163d5b52b715a68310c03fdbb13ca.cu
#ifdef GRAVITY #ifdef CUFFT #include "potential_CUFFT_3D.h" #include"../global_cuda.h" #include "../io.h" #include<iostream> Potential_CUFFT_3D::Potential_CUFFT_3D( void ){} void Potential_CUFFT_3D::Initialize( Real Lx, Real Ly, Real Lz, Real x_min, Real y_min, Real z_min, int nx, int ny, int nz, int nx_real, int ny_real, int nz_real, Real dx_real, Real dy_real, Real dz_real){ // Lbox_x = Lx; Lbox_y = Ly; Lbox_z = Lz; nx_total = nx; ny_total = ny; nz_total = nz; nx_local = nx_real; ny_local = ny_real; nz_local = nz_real; dx = dx_real; dy = dy_real; dz = dz_real; n_cells_local = nx_local*ny_local*nz_local; n_cells_total = nx_total*ny_total*nz_total; chprintf( " Using Poisson Solver: CUFFT\n"); chprintf( " CUFFT: L[ %f %f %f ] N[ %d %d %d ] dx[ %f %f %f ]\n", Lbox_x, Lbox_y, Lbox_z, nx_local, ny_local, nz_local, dx, dy, dz ); chprintf( " CUFFT: Allocating memory...\n"); AllocateMemory_CPU(); AllocateMemory_GPU(); chprintf( " CUFFT: Creating FFT plan...\n"); cufftPlan3d( &plan_cufft_fwd, nz_local, ny_local, nx_local, CUFFT_Z2Z); cufftPlan3d( &plan_cufft_bwd, nz_local, ny_local, nx_local, CUFFT_Z2Z); chprintf( " CUFFT: Computing K for Gravity Green Funtion\n"); cudaMalloc( (void**)&F.G_d, n_cells_local*sizeof(Real)); Get_K_for_Green_function(); threads_per_block = 512; blocks_per_grid = (( n_cells_local - 1 ) / threads_per_block) + 1; chprintf( " CUFFT: Using %d threads and %d blocks for applying G funtion: %d \n", threads_per_block, blocks_per_grid, threads_per_block*blocks_per_grid); } void Potential_CUFFT_3D::AllocateMemory_CPU( void ){ F.output_h = (Complex_cufft *) malloc(n_cells_local*sizeof(Complex_cufft)); F.G_h = (Real *) malloc(n_cells_local*sizeof(Real_cufft)); } void Potential_CUFFT_3D::AllocateMemory_GPU( void ){ cudaMalloc( (void**)&F.input_real_d, n_cells_local*sizeof(Real_cufft)); cudaMalloc( (void**)&F.input_d, n_cells_local*sizeof(Complex_cufft)); cudaMalloc( (void**)&F.transform_d, n_cells_local*sizeof(Complex_cufft)); cudaMalloc( (void**)&F.output_d, n_cells_local*sizeof(Complex_cufft)); cudaMalloc( (void**)&F.G_d, n_cells_local*sizeof(Real_cufft)); CudaCheckError(); } void Potential_CUFFT_3D::FreeMemory_GPU( void ){ cudaFree( F.input_real_d ); cudaFree( F.input_d ); cudaFree( F.output_d ); cudaFree( F.transform_d ); cudaFree( F.G_d ); CudaCheckError(); } void Potential_CUFFT_3D::Reset( void ){ // chprintf("Reset CUFFT\n"); free( F.output_h ); free( F.G_h ); FreeMemory_GPU(); } void Potential_CUFFT_3D::Get_K_for_Green_function( void){ Real kx, ky, kz, Gx, Gy, Gz, G; int id; for (int k=0; k<nz_local; k++){ kz = 2*M_PI*k/nz_local; Gz = sin( kz/2 ); for (int j=0; j<ny_local; j++){ ky = 2*M_PI*j/ny_local; Gy = sin( ky/2 ); for ( int i=0; i<nx_local; i++){ id = i + j*nx_local + k*nx_local*ny_local; kx = 2*M_PI*i/nx_local; Gx = sin( kx/2 ); G = -1 / ( Gx*Gx + Gy*Gy + Gz*Gz ) * dx * dx / 4 ; if ( id == 0 ) G = 1; F.G_h[id] = G; // F.G_h[id] = 0.1; } } } cudaMemcpy( F.G_d, F.G_h, n_cells_local*sizeof(Real), cudaMemcpyHostToDevice ); CudaCheckError(); } __global__ void Copy_Input_Kernel( int n_cells, Real *input_h, Complex_cufft *input_d, Real Grav_Constant, Real dens_avrg, Real current_a ){ int t_id = threadIdx.x + blockIdx.x*blockDim.x; if ( t_id < n_cells ){ #ifdef COSMOLOGY input_d[t_id].x = 4 * M_PI * Grav_Constant * ( input_h[t_id] - dens_avrg ) / current_a; #else input_d[t_id].x = 4 * M_PI * Grav_Constant * input_h[t_id]; #endif input_d[t_id].y = 0.0; } } void Potential_CUFFT_3D::Copy_Input( Real *input_density, Real Grav_Constant, Real dens_avrg, Real current_a ){ cudaMemcpy( F.input_real_d, input_density, n_cells_local*sizeof(Real_cufft), cudaMemcpyHostToDevice ); Copy_Input_Kernel<<<blocks_per_grid, threads_per_block>>>( n_cells_local, F.input_real_d, F.input_d, Grav_Constant, dens_avrg, current_a ); } void Potential_CUFFT_3D::Copy_Output( Real *output_potential ){ cudaMemcpy( F.output_h, F.output_d, n_cells_local*sizeof(Complex_cufft), cudaMemcpyDeviceToHost ); int id, id_pot; int i, k, j; for (k=0; k<nz_local; k++) { for (j=0; j<ny_local; j++) { for (i=0; i<nx_local; i++) { id = i + j*nx_local + k*nx_local*ny_local; id_pot = (i+N_GHOST_POTENTIAL) + (j+N_GHOST_POTENTIAL)*(nx_local+2*N_GHOST_POTENTIAL) + (k+N_GHOST_POTENTIAL)*(nx_local+2*N_GHOST_POTENTIAL)*(ny_local+2*N_GHOST_POTENTIAL); output_potential[id_pot] = F.output_h[id].x / n_cells_local; } } } } __global__ void Apply_G_Funtion( int n_cells, Complex_cufft *transform, Real *G ){ int t_id = threadIdx.x + blockIdx.x*blockDim.x; Real G_val; if ( t_id < n_cells ){ G_val = G[t_id]; if ( t_id == 0 ) G_val = 1.0; transform[t_id].x *= G_val; transform[t_id].y *= G_val; if ( t_id == 0 ){ transform[t_id].x = 0; transform[t_id].y = 0; } } } Real Potential_CUFFT_3D::Get_Potential( Real *input_density, Real *output_potential, Real Grav_Constant, Real dens_avrg, Real current_a ){ // cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // // AllocateMemory_GPU(); Copy_Input( input_density, Grav_Constant, dens_avrg, current_a ); cufftExecZ2Z( plan_cufft_fwd, F.input_d, F.transform_d, CUFFT_FORWARD ); Apply_G_Funtion<<<blocks_per_grid, threads_per_block>>>( n_cells_local, F.transform_d, F.G_d ); cufftExecZ2Z( plan_cufft_bwd, F.transform_d, F.output_d, CUFFT_INVERSE ); Copy_Output( output_potential ); // // FreeMemory_GPU(); // cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); // chprintf( " CUFFT: Potential Time = %f msecs\n", milliseconds); // return (Real) milliseconds; return 0; } #endif //POTENTIAL_CUFFT #endif //GRAVITY
25a737a7f9173da1549bd92bcab6a0b6ed8b60e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % by: Alireza Ahmadi % % University of Bonn- MSc Robotics & Geodetic Engineering% % [email protected] % % AlirezaAhmadi.xyz % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ // optimizations... // 1. jacobianT matricescan be generate in kernel at the smae time #include "nonRigidICP.h" // #define DEBUG // #define DEBUG_DefGraph // #define LOG_EN #define LOG_MAX 100 namespace DynaMap{ namespace solver{ nonRigidICP::nonRigidICP(void){} nonRigidICP::~nonRigidICP(void){ Free(); } void nonRigidICP::init(geometry::defGraph &warpField, Properties &_Prop, int problemSize){ alpha = 0; beta = 0; n = 0; m = 0; k = 0; prevError = 10; Mdata = problemSize; Prop = _Prop; alpha = 0; beta = 0; n = 0; m = 0; k = 0; // std::cout << "Solver init.... " << std::endl; } void nonRigidICP::Free(void){ hipDeviceSynchronize(); hipFree(cloud->points); hipFree(cloud->normals); hipFree(cloud); } /*************************************************************************************************/ __device__ math::dualQuat nonRigidICP::getPDQGradient(math::dualQuat& dq, int paramID){ // get dq of neighbour with ID of neighbourNum and extract its SE3 from // get Euler Angles from dq math::EulerAngles euAngles = dq.getEulerAngles(); // get translation vector from dq float3 trans = dq.getTranslation(); // change i-th (paramID) in +EPSILLON value switch(paramID){ case 0:{ // roll -> x euAngles.roll += EPSILLON; break; }case 1:{ // pitch -> y euAngles.pitch += EPSILLON; break; }case 2:{ // yaw -> z euAngles.yaw += EPSILLON; break; }case 3:{ // trans -> x trans.x += EPSILLON; break; }case 4:{ // trans -> y trans.y += EPSILLON; break; }case 5:{ // trans -> z trans.z += EPSILLON; break; } } // get dq form again math::dualQuat pDQ(euAngles, trans); return pDQ; } __device__ math::dualQuat nonRigidICP::getNDQGradient(math::dualQuat& dq, int paramID){ // get Euler Angles from dq math::EulerAngles euAngles = dq.getEulerAngles(); // get translation vector from dq float3 trans = dq.getTranslation(); // change i-th (paramID) in +EPSILLON value switch(paramID){ case 0:{ // roll -> x euAngles.roll -= EPSILLON; break; }case 1:{ // pitch -> y euAngles.pitch -= EPSILLON; break; }case 2:{ // yaw -> z euAngles.yaw -= EPSILLON; break; }case 3:{ // trans -> x trans.x -= EPSILLON; break; }case 4:{ // trans -> y trans.y -= EPSILLON; break; }case 5:{ // trans -> z trans.z -= EPSILLON; break; } } // get dq form again math::dualQuat nDQ(euAngles, trans); return nDQ; } __device__ float nonRigidICP::getDQGradient(math::dualQuat *subWarpField, float* subWarpFiledWeights, blender::dqBlender &blender, int nodeIndex, int paramID, float3 normalAtSrc, float3 vertexPose){ // for(int j=0; j< KNN; j++){ // printf("id: %d, %f", j,subWarpFiledWeights[j]); // } math::dualQuat backupDQ = subWarpField[nodeIndex]; // get dq of neighbour with ID of neighbourNum and extract its SE3 from math::EulerAngles euAngles = backupDQ.getEulerAngles(); // get translation vector from dq float3 trans = backupDQ.getTranslation(); // change i-th (paramID) in -EPSILLON value switch(paramID){ case 0:{ // roll -> x euAngles.roll -= EPSILLON; break; }case 1:{ // pitch -> y euAngles.pitch -= EPSILLON; break; }case 2:{ // yaw -> z euAngles.yaw -= EPSILLON; break; }case 3:{ // trans -> x trans.x -= EPSILLON; break; }case 4:{ // trans -> y trans.y -= EPSILLON; break; }case 5:{ // trans -> z trans.z -= EPSILLON; break; } } // get dq form again subWarpField[nodeIndex] = math::dualQuat(euAngles, trans); // blend vertex with k neighbour float3 blendedNDQ = blender.blendVertexPose(subWarpField, subWarpFiledWeights, vertexPose); // change i-th (paramID) in +EPSILLON value switch(paramID){ case 0:{ // roll -> x euAngles.roll += 2* EPSILLON; break; }case 1:{ // pitch -> y euAngles.pitch += 2* EPSILLON; break; }case 2:{ // yaw -> z euAngles.yaw += 2* EPSILLON; break; }case 3:{ // trans -> x trans.x += 2* EPSILLON; break; }case 4:{ // trans -> y trans.y += 2* EPSILLON; break; }case 5:{ // trans -> z trans.z += 2* EPSILLON; break; } } // get dq form again subWarpField[nodeIndex] = math::dualQuat(euAngles, trans); // blend vertex with k neighbour float3 blendedPDQ = blender.blendVertexPose(subWarpField, subWarpFiledWeights, vertexPose); // get numerical derivative w.r.t the changed parameter float nGradient = distance(blendedPDQ , blendedNDQ)/(2 * EPSILLON); // reload back the original value of warpfiled DQ subWarpField[nodeIndex] = backupDQ; // if(nGradient != 0.0f)printf("%d, %f \n",pixelID, nGradient); // rerun the gradient w.r.t i-th paramete (paramID) return nGradient; } __device__ void nonRigidICP::updateDataJacobianBlock(Jacobian *_Jacob, math::dualQuat *subWarpField, float* subWarpFiledWeights, blender::dqBlender &blender, int ObsNum, int ObsID, int nodeID, int nodeIndex, float3 srcVertexPose, float3 srcVertexNormal, float3 dstVertexPose){ int m = ObsNum; int id = (nodeID * m * 6) + ObsID; // Filling Jacobian Blocks for(int paramID = 0; paramID < 6; paramID++){ int jacobIndex = paramID * m + id; _Jacob[jacobIndex] = getDQGradient(subWarpField, subWarpFiledWeights, blender, nodeIndex, paramID, srcVertexNormal, srcVertexPose); // if(_Jacob[jacobIndex] != 0.0f)printf("Jacob: %d, %f \n",jacobIndex, _Jacob[jacobIndex]); } } // builds Data term Jacobian on depth image __global__ void buildDataJacbianKernel(nonRigidICP &nonRigidSolver, geometry::defGraph &warpField, blender::dqBlender &blender, float *targetdepth, float *sourcedepth, geometry::PointCloudXYZ &cloud, rgbdSensor sensor, float4x4 cuPose, Jacobian *Jacob, Jacobian *residuals) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = sensor.rows * sensor.cols; // for each pixel in predicted image pixelID = idx // node Id is different that vertex ID in mesh for (int idx = index; idx < size; idx += stride) { if(sourcedepth[idx] > 2.0 || sourcedepth[idx] < 0.03 || targetdepth[idx] > 2.0 || targetdepth[idx] < 0.03 || cloud.normals[idx] == make_float3(0,0,0)) continue; // project predicted image pixels to 3D-space float3 vc = getPoint3d(idx, sourcedepth[idx], sensor); // todo... make sure transform is correct float3 nc = cloud.normals[idx]; // todo ....update normals.... // get the corresponding vertex in live-frame depth image float3 vl = cuPose * getPoint3d(idx, targetdepth[idx], sensor); // todo... make sure transform is correct // update residuals residuals[idx] = DynaMap::dot(nc, (vc - vl)); // fill jacobian blocks w.r.t K nodes affecting spceific pixels on the depth map math::dualQuat subWarpField[KNN]; float subWarpFiledWeights[KNN] = {0.0f}; for(int nodeIndex = 0; nodeIndex < KNN; nodeIndex++){ // form J (update Ji block in each thread) int nodeID = 0; if(warpField.KDTREE){ nodeID = idx * warpField.nNum + nodeIndex; }else{ nodeID = idx * warpField.nodeNum + nodeIndex; } subWarpField[nodeIndex] = warpField.nodes[nodeID].dq; subWarpFiledWeights[nodeIndex] = warpField.visibleNWeights[nodeID]; // for(int j=0; j< KNN; j++){ // printf("id: %d, %f", j, subWarpFiledWeights[j]); // } } for(int nodeIndex = 0; nodeIndex < KNN; nodeIndex++){ int tmpID = 0; if(warpField.KDTREE){ tmpID = idx * warpField.nNum + nodeIndex; }else{ tmpID = idx * warpField.nodeNum + nodeIndex; } int nodeID = warpField.visibleNodeIds[tmpID]; // form J (update Ji block in each thread) // nonRigidSolver.updateDataJacobianBlock(Jacob, // output Jaboian (in each call one block gets updated) // subWarpField, // sub-WarpField (Deformation Graph) // subWarpFiledWeights, // blender, // current blending status // sensor.rows * sensor.cols, // Observations Num // idx, // pixel ID // nodeID, // nodeIndex, // near graphNode ID (in query) // vc, // vertex of current pixel // nc, // normal at current vertex position // vl); // corresponding vertex in live-frame } } } // builds Data term Jacobian on target mesh __global__ void buildDataJacbianKernel(nonRigidICP &nonRigidSolver, geometry::defGraph &warpField, blender::dqBlender &blender, geometry::MeshSTD &targetMesh, float4x4 cuPose, Jacobian *Jacob, Jacobian *residuals) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = targetMesh.verticesNum; // for each vertex target mesh // node Id is different that vertex ID in mesh for (int idx = index; idx < size; idx += stride) { // invoke 3D position and normal of vertex in source mesh float3 vc = warpField.defGraphMesh.vertices[idx].position; // todo ....camera pose update ?.... float3 nc = warpField.defGraphMesh.vertices[idx].normal; // todo ....update normals ?.... // get the corresponding vertex in target mesh // todo... need correspondance // (for now as we now they are same) also ActiveNodesDistances and Ids can be used float3 vl = targetMesh.vertices[idx].position; // update residuals residuals[idx] = DynaMap::dot(nc, (vc - vl)); // pick K nearest neighbour dual-quaternions and weights from main graph math::dualQuat subWarpField[KNN]; float subWarpFiledWeights[KNN] = {0.0f}; for(int nodeIndex = 0; nodeIndex < KNN; nodeIndex++){ // form J (update Ji block in each thread) int nodeID = 0; if(warpField.KDTREE){ nodeID = idx * warpField.nNum + nodeIndex; }else{ nodeID = idx * warpField.nodeNum + nodeIndex; } subWarpField[nodeIndex] = warpField.nodes[nodeID].dq; subWarpFiledWeights[nodeIndex] = warpField.visibleNWeights[nodeID]; } // fill jacobian blocks w.r.t K nodes affecting spceific pixels on the depth map for(int nodeIndex = 0; nodeIndex < KNN; nodeIndex++){ int tmpID = 0; if(warpField.KDTREE){ tmpID = idx * warpField.nNum + nodeIndex; }else{ tmpID = idx * warpField.nodeNum + nodeIndex; } int nodeID = warpField.visibleNodeIds[tmpID]; // form J (update Ji block in each thread) nonRigidSolver.updateDataJacobianBlock(Jacob, // output Jaboian (in each call one block gets updated) subWarpField, // sub-WarpField (Deformation Graph) subWarpFiledWeights, blender, // current blending status targetMesh.verticesNum, // Observations Num idx, // Observation ID -> pixelID or vertexID nodeID, nodeIndex, // near graphNode ID (in query) vc, // vertex of current pixel nc, // normal at current vertex position vl); // corresponding vertex in live-frame } } } /*************************************************************************************************/ __device__ void nonRigidICP::updateRegJacobianBlock(Jacobian *_Jacob, geometry::defGraph &warpField, int nodeID){ // gets position of current neighbor node float3 di = warpField.nodes[nodeID].vertex.position; // get number of entier nodes in deformation graph int n = warpField.nodeNum; int id = (6 * 3 * n * nodeID) + 3 * nodeID; // Jacobian Blocks for main node // First row _Jacob[0 * 3 * warpField.nodeNum + id] = 0.0f; _Jacob[1 * 3 * warpField.nodeNum + id] = di.z; _Jacob[2 * 3 * warpField.nodeNum + id] = -di.y; _Jacob[3 * 3 * warpField.nodeNum + id] = 1.0f; _Jacob[4 * 3 * warpField.nodeNum + id] = 0.0f; _Jacob[5 * 3 * warpField.nodeNum + id] = 0.0f; // Second row _Jacob[0 * 3 * warpField.nodeNum + id + 1] = -di.z; _Jacob[1 * 3 * warpField.nodeNum + id + 1] = 0.0f; _Jacob[2 * 3 * warpField.nodeNum + id + 1] = di.x; _Jacob[3 * 3 * warpField.nodeNum + id + 1] = 0.0f; _Jacob[4 * 3 * warpField.nodeNum + id + 1] = 1.0f; _Jacob[5 * 3 * warpField.nodeNum + id + 1] = 0.0f; // Third row _Jacob[0 * 3 * warpField.nodeNum + id + 2] = di.y; _Jacob[1 * 3 * warpField.nodeNum + id + 2] = -di.x; _Jacob[2 * 3 * warpField.nodeNum + id + 2] = 0.0f; _Jacob[3 * 3 * warpField.nodeNum + id + 2] = 0.0f; _Jacob[4 * 3 * warpField.nodeNum + id + 2] = 0.0f; _Jacob[5 * 3 * warpField.nodeNum + id + 2] = 1.0f; for(int n = 0; n < KNN; n++){ // gets the ID of the neighbor list for current node int neighborID = warpField.nodes[nodeID].nIds[n]; // if(nodeID == 0)printf("NodeID: %d, nID: %d \n", nodeID, neighborID); // gets position of selected neighbor node of currect node float3 dj = warpField.nodes[neighborID].vertex.position; // id of neighbour ???!!! // printf("%d, %d, %d, %d, %d\n",(id + 0 * n + 0 + nodeID), (id + 0 * n + 0 + neighborID), neighborID, id, NodeNeighbor); int id = (6 * 3 * warpField.nodeNum * neighborID) + nodeID * 3 ; //********************************************************// // Jacobian Blocks for each node neighbour // First row _Jacob[0 * 3 * warpField.nodeNum + id] = 0; _Jacob[1 * 3 * warpField.nodeNum + id] = dj.z; _Jacob[2 * 3 * warpField.nodeNum + id] = -dj.y; _Jacob[3 * 3 * warpField.nodeNum + id] = 1; _Jacob[4 * 3 * warpField.nodeNum + id] = 0; _Jacob[5 * 3 * warpField.nodeNum + id] = 0; // Second row _Jacob[0 * 3 * warpField.nodeNum + id + 1] = -dj.z; _Jacob[1 * 3 * warpField.nodeNum + id + 1] = 0; _Jacob[2 * 3 * warpField.nodeNum + id + 1] = dj.x; _Jacob[3 * 3 * warpField.nodeNum + id + 1] = 0; _Jacob[4 * 3 * warpField.nodeNum + id + 1] = 1; _Jacob[5 * 3 * warpField.nodeNum + id + 1] = 0; // Third row _Jacob[0 * 3 * warpField.nodeNum + id + 2] = dj.y; _Jacob[1 * 3 * warpField.nodeNum + id + 2] = -dj.x; _Jacob[2 * 3 * warpField.nodeNum + id + 2] = 0; _Jacob[3 * 3 * warpField.nodeNum + id + 2] = 0; _Jacob[4 * 3 * warpField.nodeNum + id + 2] = 0; _Jacob[5 * 3 * warpField.nodeNum + id + 2] = 1; } } // computes and returns Regularization residuals for each Node of derGraph specified with nodeID __device__ float3 nonRigidICP::getRegResiduals(geometry::defGraph &warpField, int nodeID){ float3 result; // vertex of neigbhour node j // float3 vi = warpField.nodes[nodeID].vertex.position; // Transformation of target node i float4x4 Ti = warpField.nodes[nodeID].dq.getTransformation(); for(int cnt = 0; cnt < KNN; cnt++){ // gets the neigbhour id j of target node i int neighborID = warpField.nodes[nodeID].nIds[cnt]; // vertex of neigbhour node j float3 vj = warpField.nodes[neighborID].vertex.position; // Transformation of neigbhour node j float4x4 Tj = warpField.nodes[neighborID].dq.getTransformation(); // weight of neigbhour node j float wij = fmax(warpField.nodes[nodeID].nWeights[cnt], warpField.nodes[neighborID].nWeights[cnt]); // todo... Huber Penalty should be add too ... result += wij * make_float3((Ti * make_float4(vj, 1.0f)) - (Tj * make_float4(vj, 1.0f))); // if(nodeID == 0)printf("%f, %f, %f \n", result.x, result.y, result.z); } return result; } // builds Regularization term Jacobian __global__ void buildRegJacbianKernel(nonRigidICP &nonRigidSolver, geometry::defGraph &warpField, Jacobian *Jacob, Jacobian *residuals) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = warpField.nodeNum; // for each node nodeID = idx // node Id is different that vertex ID in mesh for (int idx = index; idx < size; idx += stride) { nonRigidSolver.updateRegJacobianBlock(Jacob, warpField, idx); // todo... needs to be checked float3 residual = nonRigidSolver.getRegResiduals(warpField, idx); residuals[3 * idx ] = residual.x; residuals[3 * idx + 1] = residual.y; residuals[3 * idx + 2] = residual.z; // printf("Residuals ->> %d: res: %f\n",idx, residuals[idx]); } } __global__ void initIdentityGPU(float *matrix, int numR, int numC, float scalar) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = numR * numC; for (int idx = index; idx < size; idx += stride) { int x = static_cast<float>(idx / numC); int y = static_cast<float>(idx - numC * x); if(y < numR && x < numC) { if(x == y) matrix[idx] = scalar; else matrix[idx] = 0; } } } /*************************************************************************************************/ // build Jabocians and Non-Linear system and Solve LU factorization for graph to live-frame image void nonRigidICP::solve(geometry::defGraph &warpField, blender::dqBlender &blender, pyramid &targetImage, pyramid &sourceImage, Eigen::Matrix4f pose, rgbdSensor sensor){ float4x4 cuPose = float4x4(pose.data()).getTranspose(); /***************** 2D neighbour update ********************/ #ifdef DEBUG_DefGraph warpField.writeDefGraphToFile("../logs/DefGraph.txt"); #endif // todo ... update active graph nodes beforehand in gl_render // finds K nearest Active defGraph nodes on the visible scene and computes associated weights warpField.updateActiveNeighbourNodes(targetImage, sensor, cuPose); #ifdef DEBUG_DefGraph warpField.writeDefGraphToFile("../logs/DefGraph_activeNodes.txt", Mdata, 1); #endif for(int pylvl = 2; pylvl < 3; pylvl++) { // int scale = Prop.lvlScale[pylvl]; // initializing kernel papameters for (iterationNum = 1 ; iterationNum < Prop.lvlIterationNum[pylvl]; iterationNum++) { float4x4 cuPose = float4x4(pose.data()).getTranspose(); // sourceImage.getNormalsfromDepthImage(normals); hipMallocManaged(&cloud, sizeof(geometry::PointCloudXYZ)); hipMallocManaged(&cloud->points, sizeof(geometry::PointXYZ) * Mdata); hipMallocManaged(&cloud->normals, sizeof(geometry::NormalXYZ) * Mdata); targetImage.getPointCloudXYZ(*cloud, 1); targetImage.getNormalsfromVertices(*cloud); /******************** JD^T * JD ***************************/ /************** rData -> data Residuals *******************/ hipMallocManaged(&dataJacob, sizeof(Jacobian) * warpField.nodeNum * 6 * Mdata); hipMallocManaged(&rData, sizeof(Jacobian) * Mdata); hipDeviceSynchronize(); // Reseting Jacobin values hipMemset(rData, 0, sizeof(Jacobian) * Mdata); hipMemset(dataJacob, 0, sizeof(Jacobian) * warpField.nodeNum * 6 * Mdata); hipDeviceSynchronize(); #ifdef LOG_EN std::cout << "* dataJ" << std::endl; std::cout << "* rData" << std::endl; #endif int threads_per_block = 64; int thread_blocks =(Mdata + threads_per_block - 1) / threads_per_block; hipLaunchKernelGGL(( buildDataJacbianKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, warpField, blender, targetImage.depth, sourceImage.depth, *cloud, targetImage.sensor, cuPose, dataJacob, rData); hipDeviceSynchronize(); // write dataJacob into file for debug #ifndef DEBUG m = Mdata; n = 6 * warpField.nodeNum; writeMatrixToTxt("../logs/dataJacob.txt", dataJacob, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif // write rData into file for debug #ifndef DEBUG m = Mdata; n = 1; writeMatrixToTxt("../logs/rData.txt", rData, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif /******************** JR^T * JR ***************************/ /************* rReg -> data Residuals *********************/ hipMallocManaged(&regJacob, sizeof(Jacobian) * 3 * warpField.nodeNum * 6 * warpField.nodeNum); hipMallocManaged(&rReg, sizeof(Jacobian) * 3 * warpField.nodeNum); hipDeviceSynchronize(); // Reseting Jacobin values hipMemset(rReg, 0, sizeof(Jacobian) * 3 * warpField.nodeNum); hipMemset(regJacob, 0, sizeof(Jacobian) * 3 * warpField.nodeNum * 6 * warpField.nodeNum); hipDeviceSynchronize(); #ifdef LOG_EN std::cout << "* regJ" << std::endl; std::cout << "* rReg" << std::endl; #endif threads_per_block = 64; thread_blocks =(warpField.nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< buildRegJacbianKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", NODE_NUM: " << warpField.nodeNum << // ", visibleNodesNum: " << warpField.visibleNodesNum << // std::endl; // build Jacobian fro Regularization term in Error Function hipLaunchKernelGGL(( buildRegJacbianKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, warpField, regJacob, rReg); hipDeviceSynchronize(); // write regJacob into file for debug #ifdef DEBUG m = 3 * warpField.nodeNum; n = 6 * warpField.nodeNum; writeMatrixToTxt("../logs/regJacob.txt", regJacob, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif // write rReg into file for debug #ifdef DEBUG m = 3 * warpField.nodeNum; n = 1; writeMatrixToTxt("../logs/rReg.txt", rReg, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif #ifdef LOG_EN std::cout << "buildLinearSystem... " << std::endl; #endif buildLinearSystem(warpField); #ifdef LOG_EN std::cout << "Error evaluation ... " << std::endl; #endif currError = 0.0f; for(int cnt = 0; cnt < warpField.nodeNum; cnt++){ if(isnan(rData[cnt]))continue; currError += pow(rData[cnt], 2); } for(int cnt = 0; cnt < warpField.nodeNum * 3; cnt++){ if(isnan(rReg[cnt]))continue; currError += LAMBDA * pow(rReg[cnt], 2); } float change = prevError - currError; std::cout << "iteration: " << iterationNum << ", Error: " << currError << ", change: " << change << std::endl; if(change >= Prop.minIncrement){ prevError = currError; blender.blendMesh(warpField.defGraphMesh, warpField); }else if(change <= 0.0f){ std::cout << " ****** Break-out !!!! ****** "<< std::endl; break; }else{ std::cout << " ICP done ..."<< std::endl; break; } hipFree(rData); hipFree(rReg); } } } // build Jabocians and Non-Linear system and Solve LU factorization for graph to live-frame Mesh void nonRigidICP::solve(geometry::defGraph &warpField, blender::dqBlender &blender, geometry::MeshSTD &targetMesh, Eigen::Matrix4f pose){ float4x4 cuPose = float4x4(pose.data()).getTranspose(); /***************** 2D neighbour update ********************/ #ifdef DEBUG_DefGraph warpField.writeDefGraphToFile("../logs/DefGraph.txt"); #endif // todo ... update active graph nodes beforehand in gl_render // finds K nearest Active defGraph nodes on the visible scene and computes associated weights warpField.updateActiveNeighbourNodes(targetMesh, cuPose); #ifdef DEBUG_DefGraph warpField.writeDefGraphToFile("../logs/DefGraph_activeNodes.txt",targetMesh.verticesNum,1); #endif // maybe pyramid can be used too... for(int pylvl = 2; pylvl < 3; pylvl++) { //int scale = Prop.lvlScale[pylvl]; // place to add hierarcical levels of mesh (generating on place) // initializing kernel papameters for (iterationNum = 1 ; iterationNum < Prop.lvlIterationNum[pylvl]; iterationNum++) { /******************** JD^T * JD ***************************/ /************** rData -> data Residuals *******************/ hipMallocManaged(&rData, sizeof(Jacobian) * Mdata); hipMallocManaged(&dataJacob, sizeof(Jacobian) * warpField.nodeNum * 6 * Mdata); hipDeviceSynchronize(); // Reseting Jacobin values hipMemset(rData, 0, sizeof(Jacobian) * Mdata); hipMemset(dataJacob, 0, sizeof(Jacobian) * warpField.nodeNum * 6 * Mdata); hipDeviceSynchronize(); #ifdef LOG_EN std::cout << "* dataJ" << std::endl; std::cout << "* rData" << std::endl; #endif int threads_per_block = 64; int thread_blocks =(Mdata + threads_per_block - 1) / threads_per_block; // std::cout << "<<< buildDataJacbianKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", NODE_NUM: " << warpField.nodeNum << // ", visibleNodesNum: " << warpField.visibleNodesNum << // std::endl; // build Jacobian for data term in Error Fucntion // for(int cnt=0;cnt<warpField.nodeNum; cnt++){ // std::cout << "index: " << cnt << ", " << warpField.nodes[cnt].dq << std::endl; // } // for(int cnt = 0; cnt < targetMesh.verticesNum; cnt ++){ // for(int j =0; j<warpField.nNum; j++){ // int nidx = cnt * warpField.nNum + j; // printf("id: %d, j:%d, %d, %f, %f \n", cnt, j , warpField.visibleNodeIds[nidx], warpField.visibleNDistances[nidx], warpField.visibleNWeights[nidx]); // } // } hipLaunchKernelGGL(( buildDataJacbianKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, warpField, blender, targetMesh, cuPose, dataJacob, rData); hipDeviceSynchronize(); // write dataJacob into file for debug #ifdef DEBUG m = Mdata; n = 6 * warpField.nodeNum; writeMatrixToTxt("../logs/dataJacob.txt", dataJacob, sizeof(Jacobian), m, n, 1, 0, m); #endif // write rData into file for debug #ifdef DEBUG m = Mdata; n = 1; writeMatrixToTxt("../logs/rData.txt", rData, sizeof(Jacobian), m, n, 1, 0, m); #endif /******************** JR^T * JR ***************************/ /************* rReg -> data Residuals *********************/ hipMallocManaged(&regJacob, sizeof(Jacobian) * 3 * warpField.nodeNum * 6 * warpField.nodeNum); hipMallocManaged(&rReg, sizeof(Jacobian) * 3 * warpField.nodeNum); hipDeviceSynchronize(); // Reseting Jacobin values hipMemset(rReg, 0, sizeof(Jacobian) * 3 * warpField.nodeNum); hipMemset(regJacob, 0, sizeof(Jacobian) * 3 * warpField.nodeNum * 6 * warpField.nodeNum); hipDeviceSynchronize(); #ifdef LOG_EN std::cout << "* regJ" << std::endl; std::cout << "* rReg" << std::endl; #endif threads_per_block = 64; thread_blocks =(warpField.nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< buildRegJacbianKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", NODE_NUM: " << warpField.nodeNum << // ", visibleNodesNum: " << warpField.visibleNodesNum << // std::endl; // build Jacobian fro Regularization term in Error Function hipLaunchKernelGGL(( buildRegJacbianKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, warpField, regJacob, rReg); hipDeviceSynchronize(); // write regJacob into file for debug #ifdef DEBUG m = 3 * warpField.nodeNum; n = 6 * warpField.nodeNum; writeMatrixToTxt("../logs/regJacob.txt", regJacob, sizeof(Jacobian), m, n, 1, 0, m); #endif // write rReg into file for debug #ifdef DEBUG m = 3 * warpField.nodeNum; n = 1; writeMatrixToTxt("../logs/rReg.txt", rReg, sizeof(Jacobian), m, n, 1, 0, m); #endif #ifdef LOG_EN std::cout << "buildLinearSystem... " << std::endl; #endif buildLinearSystem(warpField); #ifdef LOG_EN std::cout << "Error evaluation ... " << std::endl; #endif currError = 0.0f; for(int cnt = 0; cnt < warpField.nodeNum; cnt++){ if(isnan(rData[cnt]))continue; currError += pow(rData[cnt], 2); } for(int cnt = 0; cnt < warpField.nodeNum * 3; cnt++){ if(isnan(rReg[cnt]))continue; currError += LAMBDA * pow(rReg[cnt], 2); } float change = prevError - currError; std::cout << "iteration: " << iterationNum << ", Error: " << currError << ", change: " << change << std::endl; if(change >= Prop.minIncrement){ prevError = currError; blender.blendMesh(warpField.defGraphMesh, warpField); }else if(change <= 0.0f){ std::cout << " ****** Break-out !!!! ****** "<< std::endl; break; }else{ std::cout << " ICP done ..."<< std::endl; break; } } } } // build Jabocians and Non-Linear system and Solve LU factorization Common steps void nonRigidICP::buildLinearSystem(geometry::defGraph &warpField){ /******************** JD^T * JD ***********************/ hipMallocManaged(&JdTJd, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); hipDeviceSynchronize(); hipMemset(JdTJd, 0, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); hipDeviceSynchronize(); #ifdef LOG_EN std::cout << "* JD^T * JD" << std::endl; #endif m = 6 * warpField.nodeNum; n = Mdata; cuBlasMatrixMulTrans(dataJacob, dataJacob, JdTJd, m, n, m); // write JdTJd into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/JdTJd.txt", JdTJd, sizeof(Jacobian), m, m, 1, 0, LOG_MAX); #endif /******************** bData ***************************/ // from b = J^T * r hipMallocManaged(&bData, sizeof(Jacobian) * 6 * warpField.nodeNum); hipDeviceSynchronize(); hipMemset(bData, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); hipDeviceSynchronize(); #ifdef LOG_EN std::cout << "* bData" << std::endl; #endif m = 6 * warpField.nodeNum; k = Mdata; n = 1; cuBlasMatrixMulTrans(dataJacob, rData, bData, m, k, n); // write bData into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/bData.txt", bData, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif hipFree(dataJacob); /******************** JR^T * JR ***********************/ hipMallocManaged(&JrTJr, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); hipDeviceSynchronize(); hipMemset(JrTJr, 0, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); hipDeviceSynchronize(); #ifdef LOG_EN std::cout << "* JR^T * JR" << std::endl; #endif alpha = 1.0; beta = 0.0; m = 6 * warpField.nodeNum; n = 3 * warpField.nodeNum; cuBlasMatrixMulTrans(regJacob, regJacob, JrTJr, m, n, m); // write JrTJr into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/JrTJr.txt", JrTJr, sizeof(Jacobian), m, m, 1, 0, LOG_MAX); #endif /******************** bReg ***************************/ // from b = J^T * r hipMallocManaged(&bReg, sizeof(Jacobian) * 6 * warpField.nodeNum); hipDeviceSynchronize(); hipMemset(bReg, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); hipDeviceSynchronize(); #ifdef LOG_EN std::cout << "* bReg" << std::endl; #endif m = 6 * warpField.nodeNum; k = 3 * warpField.nodeNum; n = 1; cuBlasMatrixMulTrans(regJacob, rReg, bReg, m, k, n); // write bReg into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/bReg.txt", bReg, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif hipFree(regJacob); /******************** A ***************************/ // A = JT * J #ifdef LOG_EN std::cout << "* JT * J" << std::endl; #endif hipMallocManaged(&JTJ, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); hipDeviceSynchronize(); hipMemset(JTJ, 0, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); hipDeviceSynchronize(); // J^T * J = JD^T * JD + LAMBDA * JReg^T * JReg alpha = 1.0f; beta = LAMBDA; m = 6 * warpField.nodeNum; cuBlasMatrixSum(alpha, JdTJd, beta, JrTJr, JTJ, m); // write JTJ into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/JTJ.txt", JTJ, sizeof(Jacobian), m, m, 1, 0, LOG_MAX); #endif hipFree(JdTJd); hipFree(JrTJr); /******************** b ***************************/ // b = bData + LAMBDA * bReg #ifdef LOG_EN std::cout << "* b" << std::endl; #endif hipMallocManaged(&b, sizeof(Jacobian) * 6 * warpField.nodeNum); hipDeviceSynchronize(); hipMemset(b, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); hipDeviceSynchronize(); alpha = -1.0f; beta = LAMBDA; m = 6 * warpField.nodeNum; n = 1; cuBlasVectorSum(alpha, bData, beta, bReg, b, m); // write b into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/b.txt", b, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif hipFree(bData); hipFree(bReg); /****************Solve Non-Linear System***********/ #ifdef LOG_EN std::cout << "Solve Linear system ... " << std::endl; #endif hipMallocManaged(&result, sizeof(Jacobian) * 6 * warpField.nodeNum); hipDeviceSynchronize(); hipMemset(result, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); // todo... correct size hipDeviceSynchronize(); solveSparceLinearSystem(warpField); // // solveLinearSystem(warpField); // // cuSolveLinearSystem(warpField); hipFree(JTJ); hipFree(b); hipFree(result); } void nonRigidICP::cuSolveLinearSystem(geometry::defGraph &warpField){ /******************** cuSolver **************************/ // create Solver handler hipsolverDnCreate(&cuSolverHandle); m = 6 * warpField.nodeNum; n = 6 * warpField.nodeNum; int Lwork = 0; hipsolverDnSgetrf_bufferSize(cuSolverHandle, m, n, JTJ, m, &Lwork); std::cout << "* bufferSize ... , Lwork: " << Lwork << std::endl; // set up workspace float* d_workspace; int* d_ipiv, *d_info; int h_info = 0; int lda = m; hipMalloc(&d_workspace, Lwork * sizeof(float)); hipMalloc(&d_ipiv, min(m,n) * sizeof(int)); hipMalloc(&d_info, sizeof(int)); hipDeviceSynchronize(); // decomposition std::cout << "* LU Decomposition of A ... "<< std::endl; cuSolverStatus = hipsolverDnSgetrf(cuSolverHandle, m, n, JTJ, lda, d_workspace, d_ipiv, d_info); hipMemcpy(&h_info, d_info, sizeof(int), hipMemcpyDeviceToHost); if(cuSolverStatus != CUSOLVER_STATUS_SUCCESS) { std::cerr<<"failed to LU, info = "<<h_info<<std::endl; } else { std::cerr<<"done LU, info = "<<h_info<<std::endl; } // solve int ldb = n; std::cout << "* Solving op(A)x = b " << std::endl; cuSolverStatus = hipsolverDnSgetrs(cuSolverHandle, HIPBLAS_OP_N, n, 1, JTJ, n, d_ipiv, b, ldb, d_info); hipMemcpy(&h_info, d_info, sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); if(cuSolverStatus != CUSOLVER_STATUS_SUCCESS) { std::cerr<<"failed to solve, info = "<<h_info<<std::endl; } else { std::cerr<<"solved, info = "<<h_info<<std::endl; } if (d_info ) { hipFree(d_info); } if (d_workspace) { hipFree(d_workspace); } if (d_ipiv ) { hipFree(d_ipiv);} const float _alfa = 1.0f; const float _beta = 1.0f; m = 6 * warpField.nodeNum; n = 1; hipMemset(result, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); // todo.... result value is not correct!!! hipDeviceSynchronize(); cuBlasVectorSum(_alfa, result, _beta, b, result, m); warpField.updateNodesDQ(result); // write result into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/result.txt", result, sizeof(Jacobian), m, n, 1, 0, m); #endif } void nonRigidICP::solveLinearSystem(geometry::defGraph &warpField){ /******************** Eigen Chol. Decomposition**************************/ m = 6 * warpField.nodeNum; n = 6 * warpField.nodeNum; Jacobian *h_JTJ = new Jacobian[m * n]; Jacobian *h_JTr = new Jacobian[m * 1]; memset(h_JTJ, 0, sizeof(Jacobian) * m * n); memset(h_JTr, 0, sizeof(Jacobian) * m * 1); hipMemcpy(h_JTJ, JTJ, sizeof(Jacobian) * m * n, hipMemcpyDeviceToHost); hipMemcpy(h_JTr, b, sizeof(Jacobian) * m * 1, hipMemcpyDeviceToHost); hipDeviceSynchronize(); Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic> A; A = (Eigen::Map<Eigen::Matrix<float, 6*NODE_NUM, 6*NODE_NUM> >(h_JTJ)).cast <double> (); Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic> _b; _b = (Eigen::Map<Eigen::Matrix<float, 6*NODE_NUM, 1> >(h_JTr)).cast <double> (); double scaling = 1 / A.maxCoeff(); _b *= scaling; A *= scaling; float alfa = Prop.regularization * A.maxCoeff(); A = A + alfa * Eigen::MatrixXd::Identity(6*NODE_NUM, 6*NODE_NUM) * 1/iterationNum; // std::cout << "Eigen Solve LTVL" << std::endl; Eigen::Matrix<double, 6*NODE_NUM, 1> increment = A.ldlt().solve(_b); Jacobian *h_increment = new Jacobian[m * 1]; memset(h_increment, 0, sizeof(Jacobian) * m * 1); for(int cnt=0; cnt<m; cnt++){ h_increment[cnt] = increment[cnt]; } const float _alfa = 1.0f; const float _beta = 1.0f; m = 6 * warpField.nodeNum; n = 1; hipMemset(result, 0, sizeof(Jacobian) * m * n); // todo.... result value is not correct!!! hipMemcpy(result, h_increment, sizeof(Jacobian) * m * n, hipMemcpyHostToDevice); hipDeviceSynchronize(); // std::cout << "apply increments ... " << std::endl; cuBlasVectorSum(_alfa, result, _beta, b, result, m); // std::cout << "updating WarpField ... " << std::endl; warpField.updateNodesDQ(b); // std::cout << "update done ... " << std::endl; // write result into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/result.txt", result, sizeof(Jacobian), m, n, 1, 0, m); #endif } void nonRigidICP::solveSparceLinearSystem(geometry::defGraph &warpField){ // // --- Initialize cuSPARSE hipsparseHandle_t handle; hipsparseCreate(&handle); const int Nrows = 6 * warpField.nodeNum; // --- Number of rows const int Ncols = 6 * warpField.nodeNum; // --- Number of columns const int N = Nrows; float *identityMat; hipMalloc(&identityMat, Nrows * Ncols * sizeof(float)); hipMemset(identityMat, 0, Nrows * Ncols * sizeof(float)); hipDeviceSynchronize(); int threads_per_block = 64; int thread_blocks =(Nrows * Ncols + threads_per_block - 1) / threads_per_block; hipLaunchKernelGGL(( initIdentityGPU) , dim3(thread_blocks), dim3(threads_per_block), 0, 0, identityMat, Nrows, Ncols, Prop.regularization/iterationNum); hipDeviceSynchronize(); alpha = 1.0f; beta = 1.0f; cuBlasVectorSum(alpha, JTJ, beta, identityMat, JTJ, Ncols); hipDeviceSynchronize(); // --- Descriptor for sparse matrix A hipsparseMatDescr_t descrA; hipsparseCreateMatDescr(&descrA); hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ZERO); int nnz = 0; // --- Number of nonzero elements in dense matrix const int lda = Nrows; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row int *d_nnzPerVector; hipMalloc(&d_nnzPerVector, Nrows * sizeof(*d_nnzPerVector)); hipMemset(d_nnzPerVector, 0, Nrows * sizeof(*d_nnzPerVector)); hipsparseSnnz(handle, HIPSPARSE_DIRECTION_ROW, Nrows, Ncols, descrA, JTJ, lda, d_nnzPerVector, &nnz); // --- Host side number of nonzero elements per row int *h_nnzPerVector = (int *)malloc(Nrows * sizeof(*h_nnzPerVector)); hipMemcpy(h_nnzPerVector, d_nnzPerVector, Nrows * sizeof(*h_nnzPerVector), hipMemcpyDeviceToHost); // printf("Number of nonzero elements in dense matrix = %i\n\n", nnz); // for (int i = 0; i < Nrows; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]); // printf("\n"); // // --- Device side dense matrix float *d_A; hipMalloc(&d_A, nnz * sizeof(*d_A)); int *d_A_RowIndices; hipMalloc(&d_A_RowIndices, (Nrows + 1) * sizeof(*d_A_RowIndices)); int *d_A_ColIndices; hipMalloc(&d_A_ColIndices, nnz * sizeof(*d_A_ColIndices)); hipsparseSdense2csr(handle, Nrows, Ncols, descrA, JTJ, lda, d_nnzPerVector, d_A, d_A_RowIndices, d_A_ColIndices); // --- Host side dense matrix float *h_A = (float *)malloc(nnz * sizeof(*h_A)); int *h_A_RowIndices = (int *)malloc((Nrows + 1) * sizeof(*h_A_RowIndices)); int *h_A_ColIndices = (int *)malloc(nnz * sizeof(*h_A_ColIndices)); hipMemcpy(h_A, d_A, nnz*sizeof(*h_A), hipMemcpyDeviceToHost); hipMemcpy(h_A_RowIndices, d_A_RowIndices, (Nrows + 1) * sizeof(*h_A_RowIndices), hipMemcpyDeviceToHost); hipMemcpy(h_A_ColIndices, d_A_ColIndices, nnz * sizeof(*h_A_ColIndices), hipMemcpyDeviceToHost); // for (int i = 0; i < nnz; ++i) printf("A[%i] = %.0f ", i, h_A[i]); printf("\n"); // for (int i = 0; i < (Nrows + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n"); // for (int i = 0; i < nnz; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]); // --- Allocating and defining dense host and device data vectors float *h_b = (float *)malloc(Nrows * sizeof(float)); hipMemcpy(h_b, b, Nrows * sizeof(float), hipMemcpyDeviceToHost); // --- Allocating the host and device side result vector float *h_x = (float *)malloc(Ncols * sizeof(float)); float *d_x; hipMalloc(&d_x, Ncols * sizeof(float)); // --- CUDA solver initialization cusolverSpHandle_t solver_handle; cusolverSpCreate(&solver_handle); // --- Using Cholesky factorization int singularity; cusolverSpScsrlsvcholHost(solver_handle, N, nnz, descrA, h_A, h_A_RowIndices, h_A_ColIndices, h_b, 0.000001, 0, h_x, &singularity); hipMemcpy(b, h_b, Nrows * sizeof(float), hipMemcpyHostToDevice); // printf("Showing the results...\n"); // for (int i = 0; i < N; i++) printf("%f\n", h_x[i]); Jacobian *h_increment = new Jacobian[m * 1]; memset(h_increment, 0, sizeof(Jacobian) * m * 1); const float _alfa = 1.0f; const float _beta = 1.0f; hipMemset(result, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); // todo.... result value is not correct!!! hipDeviceSynchronize(); cuBlasVectorSum(_alfa, result, _beta, b, result, N); warpField.updateNodesDQ(result); // write result into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/result.txt", result, sizeof(Jacobian), N, N, 1, 0, N); #endif hipFree(d_nnzPerVector); hipFree(d_A); hipFree(d_A_RowIndices); hipFree(d_A_ColIndices); hipFree(d_x); hipFree(identityMat); delete[] h_nnzPerVector; delete[] h_A; delete[] h_A_RowIndices; delete[] h_A_ColIndices; delete[] h_b; delete[] h_x; } /*************************************************************************************************/ // Multiply the arrays A and B on GPU and save the result in C // Calculate: C = op ( A ) * op ( B ) + C // (m X k) * (k X n) = (m X n) void nonRigidICP::cuBlasMatrixMul(const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda = m, ldb = k, ldc = m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); // Do the actual multiplication hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); // Destroy the handle hipblasDestroy(handle); } // Multiply the arrays A and B on GPU and save the result in C // Calculate: C = op ( A )T * op ( B ) + C // (m X k) * (k X n) = (m X n) void nonRigidICP::cuBlasMatrixMulTrans(const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda = k, ldb = k, ldc = m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); // Do the actual multiplication hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); // Destroy the handle hipblasDestroy(handle); } // Multiply the arrays A and B on GPU and save the result in C // Calculate: C = op ( A ) + op ( B ) // (m X k) * (k X n) = (m X n) void nonRigidICP::cuBlasMatrixSum(const float alf, const float *A, const float bet, const float *B, float *C, const int m){ int lda=m,ldb=m,ldc=m; int n = m; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); // Do the Transpose hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, alpha, A, lda, beta, B, ldb, C, ldc); // Destroy the handle hipblasDestroy(handle); } // Multiply the arrays A and B on GPU and save the result in C // Calculate: C = op ( A ) + op ( B ) // (m X k) * (k X n) = (m X n) void nonRigidICP::cuBlasMatrixSumTrans(const float alf, const float *A, const float bet, const float *B, float *C, const int m){ int lda=m,ldb=m,ldc=m; int n = m; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); // Do the Transpose hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, alpha, A, lda, beta, B, ldb, C, ldc); // Destroy the handle hipblasDestroy(handle); } // Sum Vector A and B on GPU and save the result in C // Calculate: C = op ( A ) + op ( B ) // (m X k) * (k X n) = (m X n) void nonRigidICP::cuBlasVectorSum(const float alf, const float *A, const float bet, const float *B, float *C, const int m){ int n =1; int lda=m,ldb=m,ldc=m; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); // Do the Transpose hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, alpha, A, lda, beta, B, ldb, C, ldc); // Destroy the handle hipblasDestroy(handle); } // Returns the Transpose of A -> C , =1, =0, op(A)= AT // Calculate: C = op ( A ) + op ( B ) // A(m * n) void nonRigidICP::cuBlasMatrixTrans(const float *A, float *C, const int m, const int n){ int lda=n,ldb=m,ldc=m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); // Do the Transpose hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, alpha, A, lda, beta, A, ldb, C, ldc); // Destroy the handle hipblasDestroy(handle); } /*************************************************************************************************/ void nonRigidICP::writeMatrixToTxt(const char* fileName, float *matrix, size_t size, int rows, int cols, int mode, int from, int to){ float *h_matrix = new float[rows * cols]; memset(h_matrix, 0, size * rows * cols); hipMemcpy(h_matrix, matrix, size * rows * cols, hipMemcpyDeviceToHost); hipDeviceSynchronize(); std::ofstream file(fileName,std::ios::ate); if (file.is_open()){ file << fileName <<", rows: " << rows << ", cols: " << cols << std::endl; file << "{ " << std::endl; for (size_t i = from; i < to; i++){ file << "[ " << i ; for (size_t j = 0; j < cols; j++){ if(mode == 0){ // row major file << std::fixed << std::setprecision(6) << " " << matrix[i * cols + j]; }else if(mode ==1){ // column major file << std::fixed << std::setprecision(6) << " " << matrix[i + rows * j]; } } file << " ]"<< std::endl; } file << " }"<< std::endl; file.close(); } else std::cout << "Unable to open file"; delete[] h_matrix; } /*************************************************************************************************/ } // end namespace solver } // end namespace DynaMap
25a737a7f9173da1549bd92bcab6a0b6ed8b60e3.cu
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % by: Alireza Ahmadi % % University of Bonn- MSc Robotics & Geodetic Engineering% % [email protected] % % AlirezaAhmadi.xyz % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ // optimizations... // 1. jacobianT matricescan be generate in kernel at the smae time #include "nonRigidICP.h" // #define DEBUG // #define DEBUG_DefGraph // #define LOG_EN #define LOG_MAX 100 namespace DynaMap{ namespace solver{ nonRigidICP::nonRigidICP(void){} nonRigidICP::~nonRigidICP(void){ Free(); } void nonRigidICP::init(geometry::defGraph &warpField, Properties &_Prop, int problemSize){ alpha = 0; beta = 0; n = 0; m = 0; k = 0; prevError = 10; Mdata = problemSize; Prop = _Prop; alpha = 0; beta = 0; n = 0; m = 0; k = 0; // std::cout << "Solver init.... " << std::endl; } void nonRigidICP::Free(void){ cudaDeviceSynchronize(); cudaFree(cloud->points); cudaFree(cloud->normals); cudaFree(cloud); } /*************************************************************************************************/ __device__ math::dualQuat nonRigidICP::getPDQGradient(math::dualQuat& dq, int paramID){ // get dq of neighbour with ID of neighbourNum and extract its SE3 from // get Euler Angles from dq math::EulerAngles euAngles = dq.getEulerAngles(); // get translation vector from dq float3 trans = dq.getTranslation(); // change i-th (paramID) in +EPSILLON value switch(paramID){ case 0:{ // roll -> x euAngles.roll += EPSILLON; break; }case 1:{ // pitch -> y euAngles.pitch += EPSILLON; break; }case 2:{ // yaw -> z euAngles.yaw += EPSILLON; break; }case 3:{ // trans -> x trans.x += EPSILLON; break; }case 4:{ // trans -> y trans.y += EPSILLON; break; }case 5:{ // trans -> z trans.z += EPSILLON; break; } } // get dq form again math::dualQuat pDQ(euAngles, trans); return pDQ; } __device__ math::dualQuat nonRigidICP::getNDQGradient(math::dualQuat& dq, int paramID){ // get Euler Angles from dq math::EulerAngles euAngles = dq.getEulerAngles(); // get translation vector from dq float3 trans = dq.getTranslation(); // change i-th (paramID) in +EPSILLON value switch(paramID){ case 0:{ // roll -> x euAngles.roll -= EPSILLON; break; }case 1:{ // pitch -> y euAngles.pitch -= EPSILLON; break; }case 2:{ // yaw -> z euAngles.yaw -= EPSILLON; break; }case 3:{ // trans -> x trans.x -= EPSILLON; break; }case 4:{ // trans -> y trans.y -= EPSILLON; break; }case 5:{ // trans -> z trans.z -= EPSILLON; break; } } // get dq form again math::dualQuat nDQ(euAngles, trans); return nDQ; } __device__ float nonRigidICP::getDQGradient(math::dualQuat *subWarpField, float* subWarpFiledWeights, blender::dqBlender &blender, int nodeIndex, int paramID, float3 normalAtSrc, float3 vertexPose){ // for(int j=0; j< KNN; j++){ // printf("id: %d, %f", j,subWarpFiledWeights[j]); // } math::dualQuat backupDQ = subWarpField[nodeIndex]; // get dq of neighbour with ID of neighbourNum and extract its SE3 from math::EulerAngles euAngles = backupDQ.getEulerAngles(); // get translation vector from dq float3 trans = backupDQ.getTranslation(); // change i-th (paramID) in -EPSILLON value switch(paramID){ case 0:{ // roll -> x euAngles.roll -= EPSILLON; break; }case 1:{ // pitch -> y euAngles.pitch -= EPSILLON; break; }case 2:{ // yaw -> z euAngles.yaw -= EPSILLON; break; }case 3:{ // trans -> x trans.x -= EPSILLON; break; }case 4:{ // trans -> y trans.y -= EPSILLON; break; }case 5:{ // trans -> z trans.z -= EPSILLON; break; } } // get dq form again subWarpField[nodeIndex] = math::dualQuat(euAngles, trans); // blend vertex with k neighbour float3 blendedNDQ = blender.blendVertexPose(subWarpField, subWarpFiledWeights, vertexPose); // change i-th (paramID) in +EPSILLON value switch(paramID){ case 0:{ // roll -> x euAngles.roll += 2* EPSILLON; break; }case 1:{ // pitch -> y euAngles.pitch += 2* EPSILLON; break; }case 2:{ // yaw -> z euAngles.yaw += 2* EPSILLON; break; }case 3:{ // trans -> x trans.x += 2* EPSILLON; break; }case 4:{ // trans -> y trans.y += 2* EPSILLON; break; }case 5:{ // trans -> z trans.z += 2* EPSILLON; break; } } // get dq form again subWarpField[nodeIndex] = math::dualQuat(euAngles, trans); // blend vertex with k neighbour float3 blendedPDQ = blender.blendVertexPose(subWarpField, subWarpFiledWeights, vertexPose); // get numerical derivative w.r.t the changed parameter float nGradient = distance(blendedPDQ , blendedNDQ)/(2 * EPSILLON); // reload back the original value of warpfiled DQ subWarpField[nodeIndex] = backupDQ; // if(nGradient != 0.0f)printf("%d, %f \n",pixelID, nGradient); // rerun the gradient w.r.t i-th paramete (paramID) return nGradient; } __device__ void nonRigidICP::updateDataJacobianBlock(Jacobian *_Jacob, math::dualQuat *subWarpField, float* subWarpFiledWeights, blender::dqBlender &blender, int ObsNum, int ObsID, int nodeID, int nodeIndex, float3 srcVertexPose, float3 srcVertexNormal, float3 dstVertexPose){ int m = ObsNum; int id = (nodeID * m * 6) + ObsID; // Filling Jacobian Blocks for(int paramID = 0; paramID < 6; paramID++){ int jacobIndex = paramID * m + id; _Jacob[jacobIndex] = getDQGradient(subWarpField, subWarpFiledWeights, blender, nodeIndex, paramID, srcVertexNormal, srcVertexPose); // if(_Jacob[jacobIndex] != 0.0f)printf("Jacob: %d, %f \n",jacobIndex, _Jacob[jacobIndex]); } } // builds Data term Jacobian on depth image __global__ void buildDataJacbianKernel(nonRigidICP &nonRigidSolver, geometry::defGraph &warpField, blender::dqBlender &blender, float *targetdepth, float *sourcedepth, geometry::PointCloudXYZ &cloud, rgbdSensor sensor, float4x4 cuPose, Jacobian *Jacob, Jacobian *residuals) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = sensor.rows * sensor.cols; // for each pixel in predicted image pixelID = idx // node Id is different that vertex ID in mesh for (int idx = index; idx < size; idx += stride) { if(sourcedepth[idx] > 2.0 || sourcedepth[idx] < 0.03 || targetdepth[idx] > 2.0 || targetdepth[idx] < 0.03 || cloud.normals[idx] == make_float3(0,0,0)) continue; // project predicted image pixels to 3D-space float3 vc = getPoint3d(idx, sourcedepth[idx], sensor); // todo... make sure transform is correct float3 nc = cloud.normals[idx]; // todo ....update normals.... // get the corresponding vertex in live-frame depth image float3 vl = cuPose * getPoint3d(idx, targetdepth[idx], sensor); // todo... make sure transform is correct // update residuals residuals[idx] = DynaMap::dot(nc, (vc - vl)); // fill jacobian blocks w.r.t K nodes affecting spceific pixels on the depth map math::dualQuat subWarpField[KNN]; float subWarpFiledWeights[KNN] = {0.0f}; for(int nodeIndex = 0; nodeIndex < KNN; nodeIndex++){ // form J (update Ji block in each thread) int nodeID = 0; if(warpField.KDTREE){ nodeID = idx * warpField.nNum + nodeIndex; }else{ nodeID = idx * warpField.nodeNum + nodeIndex; } subWarpField[nodeIndex] = warpField.nodes[nodeID].dq; subWarpFiledWeights[nodeIndex] = warpField.visibleNWeights[nodeID]; // for(int j=0; j< KNN; j++){ // printf("id: %d, %f", j, subWarpFiledWeights[j]); // } } for(int nodeIndex = 0; nodeIndex < KNN; nodeIndex++){ int tmpID = 0; if(warpField.KDTREE){ tmpID = idx * warpField.nNum + nodeIndex; }else{ tmpID = idx * warpField.nodeNum + nodeIndex; } int nodeID = warpField.visibleNodeIds[tmpID]; // form J (update Ji block in each thread) // nonRigidSolver.updateDataJacobianBlock(Jacob, // output Jaboian (in each call one block gets updated) // subWarpField, // sub-WarpField (Deformation Graph) // subWarpFiledWeights, // blender, // current blending status // sensor.rows * sensor.cols, // Observations Num // idx, // pixel ID // nodeID, // nodeIndex, // near graphNode ID (in query) // vc, // vertex of current pixel // nc, // normal at current vertex position // vl); // corresponding vertex in live-frame } } } // builds Data term Jacobian on target mesh __global__ void buildDataJacbianKernel(nonRigidICP &nonRigidSolver, geometry::defGraph &warpField, blender::dqBlender &blender, geometry::MeshSTD &targetMesh, float4x4 cuPose, Jacobian *Jacob, Jacobian *residuals) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = targetMesh.verticesNum; // for each vertex target mesh // node Id is different that vertex ID in mesh for (int idx = index; idx < size; idx += stride) { // invoke 3D position and normal of vertex in source mesh float3 vc = warpField.defGraphMesh.vertices[idx].position; // todo ....camera pose update ?.... float3 nc = warpField.defGraphMesh.vertices[idx].normal; // todo ....update normals ?.... // get the corresponding vertex in target mesh // todo... need correspondance // (for now as we now they are same) also ActiveNodesDistances and Ids can be used float3 vl = targetMesh.vertices[idx].position; // update residuals residuals[idx] = DynaMap::dot(nc, (vc - vl)); // pick K nearest neighbour dual-quaternions and weights from main graph math::dualQuat subWarpField[KNN]; float subWarpFiledWeights[KNN] = {0.0f}; for(int nodeIndex = 0; nodeIndex < KNN; nodeIndex++){ // form J (update Ji block in each thread) int nodeID = 0; if(warpField.KDTREE){ nodeID = idx * warpField.nNum + nodeIndex; }else{ nodeID = idx * warpField.nodeNum + nodeIndex; } subWarpField[nodeIndex] = warpField.nodes[nodeID].dq; subWarpFiledWeights[nodeIndex] = warpField.visibleNWeights[nodeID]; } // fill jacobian blocks w.r.t K nodes affecting spceific pixels on the depth map for(int nodeIndex = 0; nodeIndex < KNN; nodeIndex++){ int tmpID = 0; if(warpField.KDTREE){ tmpID = idx * warpField.nNum + nodeIndex; }else{ tmpID = idx * warpField.nodeNum + nodeIndex; } int nodeID = warpField.visibleNodeIds[tmpID]; // form J (update Ji block in each thread) nonRigidSolver.updateDataJacobianBlock(Jacob, // output Jaboian (in each call one block gets updated) subWarpField, // sub-WarpField (Deformation Graph) subWarpFiledWeights, blender, // current blending status targetMesh.verticesNum, // Observations Num idx, // Observation ID -> pixelID or vertexID nodeID, nodeIndex, // near graphNode ID (in query) vc, // vertex of current pixel nc, // normal at current vertex position vl); // corresponding vertex in live-frame } } } /*************************************************************************************************/ __device__ void nonRigidICP::updateRegJacobianBlock(Jacobian *_Jacob, geometry::defGraph &warpField, int nodeID){ // gets position of current neighbor node float3 di = warpField.nodes[nodeID].vertex.position; // get number of entier nodes in deformation graph int n = warpField.nodeNum; int id = (6 * 3 * n * nodeID) + 3 * nodeID; // Jacobian Blocks for main node // First row _Jacob[0 * 3 * warpField.nodeNum + id] = 0.0f; _Jacob[1 * 3 * warpField.nodeNum + id] = di.z; _Jacob[2 * 3 * warpField.nodeNum + id] = -di.y; _Jacob[3 * 3 * warpField.nodeNum + id] = 1.0f; _Jacob[4 * 3 * warpField.nodeNum + id] = 0.0f; _Jacob[5 * 3 * warpField.nodeNum + id] = 0.0f; // Second row _Jacob[0 * 3 * warpField.nodeNum + id + 1] = -di.z; _Jacob[1 * 3 * warpField.nodeNum + id + 1] = 0.0f; _Jacob[2 * 3 * warpField.nodeNum + id + 1] = di.x; _Jacob[3 * 3 * warpField.nodeNum + id + 1] = 0.0f; _Jacob[4 * 3 * warpField.nodeNum + id + 1] = 1.0f; _Jacob[5 * 3 * warpField.nodeNum + id + 1] = 0.0f; // Third row _Jacob[0 * 3 * warpField.nodeNum + id + 2] = di.y; _Jacob[1 * 3 * warpField.nodeNum + id + 2] = -di.x; _Jacob[2 * 3 * warpField.nodeNum + id + 2] = 0.0f; _Jacob[3 * 3 * warpField.nodeNum + id + 2] = 0.0f; _Jacob[4 * 3 * warpField.nodeNum + id + 2] = 0.0f; _Jacob[5 * 3 * warpField.nodeNum + id + 2] = 1.0f; for(int n = 0; n < KNN; n++){ // gets the ID of the neighbor list for current node int neighborID = warpField.nodes[nodeID].nIds[n]; // if(nodeID == 0)printf("NodeID: %d, nID: %d \n", nodeID, neighborID); // gets position of selected neighbor node of currect node float3 dj = warpField.nodes[neighborID].vertex.position; // id of neighbour ???!!! // printf("%d, %d, %d, %d, %d\n",(id + 0 * n + 0 + nodeID), (id + 0 * n + 0 + neighborID), neighborID, id, NodeNeighbor); int id = (6 * 3 * warpField.nodeNum * neighborID) + nodeID * 3 ; //********************************************************// // Jacobian Blocks for each node neighbour // First row _Jacob[0 * 3 * warpField.nodeNum + id] = 0; _Jacob[1 * 3 * warpField.nodeNum + id] = dj.z; _Jacob[2 * 3 * warpField.nodeNum + id] = -dj.y; _Jacob[3 * 3 * warpField.nodeNum + id] = 1; _Jacob[4 * 3 * warpField.nodeNum + id] = 0; _Jacob[5 * 3 * warpField.nodeNum + id] = 0; // Second row _Jacob[0 * 3 * warpField.nodeNum + id + 1] = -dj.z; _Jacob[1 * 3 * warpField.nodeNum + id + 1] = 0; _Jacob[2 * 3 * warpField.nodeNum + id + 1] = dj.x; _Jacob[3 * 3 * warpField.nodeNum + id + 1] = 0; _Jacob[4 * 3 * warpField.nodeNum + id + 1] = 1; _Jacob[5 * 3 * warpField.nodeNum + id + 1] = 0; // Third row _Jacob[0 * 3 * warpField.nodeNum + id + 2] = dj.y; _Jacob[1 * 3 * warpField.nodeNum + id + 2] = -dj.x; _Jacob[2 * 3 * warpField.nodeNum + id + 2] = 0; _Jacob[3 * 3 * warpField.nodeNum + id + 2] = 0; _Jacob[4 * 3 * warpField.nodeNum + id + 2] = 0; _Jacob[5 * 3 * warpField.nodeNum + id + 2] = 1; } } // computes and returns Regularization residuals for each Node of derGraph specified with nodeID __device__ float3 nonRigidICP::getRegResiduals(geometry::defGraph &warpField, int nodeID){ float3 result; // vertex of neigbhour node j // float3 vi = warpField.nodes[nodeID].vertex.position; // Transformation of target node i float4x4 Ti = warpField.nodes[nodeID].dq.getTransformation(); for(int cnt = 0; cnt < KNN; cnt++){ // gets the neigbhour id j of target node i int neighborID = warpField.nodes[nodeID].nIds[cnt]; // vertex of neigbhour node j float3 vj = warpField.nodes[neighborID].vertex.position; // Transformation of neigbhour node j float4x4 Tj = warpField.nodes[neighborID].dq.getTransformation(); // weight of neigbhour node j float wij = fmax(warpField.nodes[nodeID].nWeights[cnt], warpField.nodes[neighborID].nWeights[cnt]); // todo... Huber Penalty should be add too ... result += wij * make_float3((Ti * make_float4(vj, 1.0f)) - (Tj * make_float4(vj, 1.0f))); // if(nodeID == 0)printf("%f, %f, %f \n", result.x, result.y, result.z); } return result; } // builds Regularization term Jacobian __global__ void buildRegJacbianKernel(nonRigidICP &nonRigidSolver, geometry::defGraph &warpField, Jacobian *Jacob, Jacobian *residuals) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = warpField.nodeNum; // for each node nodeID = idx // node Id is different that vertex ID in mesh for (int idx = index; idx < size; idx += stride) { nonRigidSolver.updateRegJacobianBlock(Jacob, warpField, idx); // todo... needs to be checked float3 residual = nonRigidSolver.getRegResiduals(warpField, idx); residuals[3 * idx ] = residual.x; residuals[3 * idx + 1] = residual.y; residuals[3 * idx + 2] = residual.z; // printf("Residuals ->> %d: res: %f\n",idx, residuals[idx]); } } __global__ void initIdentityGPU(float *matrix, int numR, int numC, float scalar) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = numR * numC; for (int idx = index; idx < size; idx += stride) { int x = static_cast<float>(idx / numC); int y = static_cast<float>(idx - numC * x); if(y < numR && x < numC) { if(x == y) matrix[idx] = scalar; else matrix[idx] = 0; } } } /*************************************************************************************************/ // build Jabocians and Non-Linear system and Solve LU factorization for graph to live-frame image void nonRigidICP::solve(geometry::defGraph &warpField, blender::dqBlender &blender, pyramid &targetImage, pyramid &sourceImage, Eigen::Matrix4f pose, rgbdSensor sensor){ float4x4 cuPose = float4x4(pose.data()).getTranspose(); /***************** 2D neighbour update ********************/ #ifdef DEBUG_DefGraph warpField.writeDefGraphToFile("../logs/DefGraph.txt"); #endif // todo ... update active graph nodes beforehand in gl_render // finds K nearest Active defGraph nodes on the visible scene and computes associated weights warpField.updateActiveNeighbourNodes(targetImage, sensor, cuPose); #ifdef DEBUG_DefGraph warpField.writeDefGraphToFile("../logs/DefGraph_activeNodes.txt", Mdata, 1); #endif for(int pylvl = 2; pylvl < 3; pylvl++) { // int scale = Prop.lvlScale[pylvl]; // initializing kernel papameters for (iterationNum = 1 ; iterationNum < Prop.lvlIterationNum[pylvl]; iterationNum++) { float4x4 cuPose = float4x4(pose.data()).getTranspose(); // sourceImage.getNormalsfromDepthImage(normals); cudaMallocManaged(&cloud, sizeof(geometry::PointCloudXYZ)); cudaMallocManaged(&cloud->points, sizeof(geometry::PointXYZ) * Mdata); cudaMallocManaged(&cloud->normals, sizeof(geometry::NormalXYZ) * Mdata); targetImage.getPointCloudXYZ(*cloud, 1); targetImage.getNormalsfromVertices(*cloud); /******************** JD^T * JD ***************************/ /************** rData -> data Residuals *******************/ cudaMallocManaged(&dataJacob, sizeof(Jacobian) * warpField.nodeNum * 6 * Mdata); cudaMallocManaged(&rData, sizeof(Jacobian) * Mdata); cudaDeviceSynchronize(); // Reseting Jacobin values cudaMemset(rData, 0, sizeof(Jacobian) * Mdata); cudaMemset(dataJacob, 0, sizeof(Jacobian) * warpField.nodeNum * 6 * Mdata); cudaDeviceSynchronize(); #ifdef LOG_EN std::cout << "* dataJ" << std::endl; std::cout << "* rData" << std::endl; #endif int threads_per_block = 64; int thread_blocks =(Mdata + threads_per_block - 1) / threads_per_block; buildDataJacbianKernel<<<thread_blocks, threads_per_block>>>(*this, warpField, blender, targetImage.depth, sourceImage.depth, *cloud, targetImage.sensor, cuPose, dataJacob, rData); cudaDeviceSynchronize(); // write dataJacob into file for debug #ifndef DEBUG m = Mdata; n = 6 * warpField.nodeNum; writeMatrixToTxt("../logs/dataJacob.txt", dataJacob, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif // write rData into file for debug #ifndef DEBUG m = Mdata; n = 1; writeMatrixToTxt("../logs/rData.txt", rData, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif /******************** JR^T * JR ***************************/ /************* rReg -> data Residuals *********************/ cudaMallocManaged(&regJacob, sizeof(Jacobian) * 3 * warpField.nodeNum * 6 * warpField.nodeNum); cudaMallocManaged(&rReg, sizeof(Jacobian) * 3 * warpField.nodeNum); cudaDeviceSynchronize(); // Reseting Jacobin values cudaMemset(rReg, 0, sizeof(Jacobian) * 3 * warpField.nodeNum); cudaMemset(regJacob, 0, sizeof(Jacobian) * 3 * warpField.nodeNum * 6 * warpField.nodeNum); cudaDeviceSynchronize(); #ifdef LOG_EN std::cout << "* regJ" << std::endl; std::cout << "* rReg" << std::endl; #endif threads_per_block = 64; thread_blocks =(warpField.nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< buildRegJacbianKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", NODE_NUM: " << warpField.nodeNum << // ", visibleNodesNum: " << warpField.visibleNodesNum << // std::endl; // build Jacobian fro Regularization term in Error Function buildRegJacbianKernel<<<thread_blocks, threads_per_block>>>(*this, warpField, regJacob, rReg); cudaDeviceSynchronize(); // write regJacob into file for debug #ifdef DEBUG m = 3 * warpField.nodeNum; n = 6 * warpField.nodeNum; writeMatrixToTxt("../logs/regJacob.txt", regJacob, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif // write rReg into file for debug #ifdef DEBUG m = 3 * warpField.nodeNum; n = 1; writeMatrixToTxt("../logs/rReg.txt", rReg, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif #ifdef LOG_EN std::cout << "buildLinearSystem... " << std::endl; #endif buildLinearSystem(warpField); #ifdef LOG_EN std::cout << "Error evaluation ... " << std::endl; #endif currError = 0.0f; for(int cnt = 0; cnt < warpField.nodeNum; cnt++){ if(isnan(rData[cnt]))continue; currError += pow(rData[cnt], 2); } for(int cnt = 0; cnt < warpField.nodeNum * 3; cnt++){ if(isnan(rReg[cnt]))continue; currError += LAMBDA * pow(rReg[cnt], 2); } float change = prevError - currError; std::cout << "iteration: " << iterationNum << ", Error: " << currError << ", change: " << change << std::endl; if(change >= Prop.minIncrement){ prevError = currError; blender.blendMesh(warpField.defGraphMesh, warpField); }else if(change <= 0.0f){ std::cout << " ****** Break-out !!!! ****** "<< std::endl; break; }else{ std::cout << " ICP done ..."<< std::endl; break; } cudaFree(rData); cudaFree(rReg); } } } // build Jabocians and Non-Linear system and Solve LU factorization for graph to live-frame Mesh void nonRigidICP::solve(geometry::defGraph &warpField, blender::dqBlender &blender, geometry::MeshSTD &targetMesh, Eigen::Matrix4f pose){ float4x4 cuPose = float4x4(pose.data()).getTranspose(); /***************** 2D neighbour update ********************/ #ifdef DEBUG_DefGraph warpField.writeDefGraphToFile("../logs/DefGraph.txt"); #endif // todo ... update active graph nodes beforehand in gl_render // finds K nearest Active defGraph nodes on the visible scene and computes associated weights warpField.updateActiveNeighbourNodes(targetMesh, cuPose); #ifdef DEBUG_DefGraph warpField.writeDefGraphToFile("../logs/DefGraph_activeNodes.txt",targetMesh.verticesNum,1); #endif // maybe pyramid can be used too... for(int pylvl = 2; pylvl < 3; pylvl++) { //int scale = Prop.lvlScale[pylvl]; // place to add hierarcical levels of mesh (generating on place) // initializing kernel papameters for (iterationNum = 1 ; iterationNum < Prop.lvlIterationNum[pylvl]; iterationNum++) { /******************** JD^T * JD ***************************/ /************** rData -> data Residuals *******************/ cudaMallocManaged(&rData, sizeof(Jacobian) * Mdata); cudaMallocManaged(&dataJacob, sizeof(Jacobian) * warpField.nodeNum * 6 * Mdata); cudaDeviceSynchronize(); // Reseting Jacobin values cudaMemset(rData, 0, sizeof(Jacobian) * Mdata); cudaMemset(dataJacob, 0, sizeof(Jacobian) * warpField.nodeNum * 6 * Mdata); cudaDeviceSynchronize(); #ifdef LOG_EN std::cout << "* dataJ" << std::endl; std::cout << "* rData" << std::endl; #endif int threads_per_block = 64; int thread_blocks =(Mdata + threads_per_block - 1) / threads_per_block; // std::cout << "<<< buildDataJacbianKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", NODE_NUM: " << warpField.nodeNum << // ", visibleNodesNum: " << warpField.visibleNodesNum << // std::endl; // build Jacobian for data term in Error Fucntion // for(int cnt=0;cnt<warpField.nodeNum; cnt++){ // std::cout << "index: " << cnt << ", " << warpField.nodes[cnt].dq << std::endl; // } // for(int cnt = 0; cnt < targetMesh.verticesNum; cnt ++){ // for(int j =0; j<warpField.nNum; j++){ // int nidx = cnt * warpField.nNum + j; // printf("id: %d, j:%d, %d, %f, %f \n", cnt, j , warpField.visibleNodeIds[nidx], warpField.visibleNDistances[nidx], warpField.visibleNWeights[nidx]); // } // } buildDataJacbianKernel<<<thread_blocks, threads_per_block>>>(*this, warpField, blender, targetMesh, cuPose, dataJacob, rData); cudaDeviceSynchronize(); // write dataJacob into file for debug #ifdef DEBUG m = Mdata; n = 6 * warpField.nodeNum; writeMatrixToTxt("../logs/dataJacob.txt", dataJacob, sizeof(Jacobian), m, n, 1, 0, m); #endif // write rData into file for debug #ifdef DEBUG m = Mdata; n = 1; writeMatrixToTxt("../logs/rData.txt", rData, sizeof(Jacobian), m, n, 1, 0, m); #endif /******************** JR^T * JR ***************************/ /************* rReg -> data Residuals *********************/ cudaMallocManaged(&regJacob, sizeof(Jacobian) * 3 * warpField.nodeNum * 6 * warpField.nodeNum); cudaMallocManaged(&rReg, sizeof(Jacobian) * 3 * warpField.nodeNum); cudaDeviceSynchronize(); // Reseting Jacobin values cudaMemset(rReg, 0, sizeof(Jacobian) * 3 * warpField.nodeNum); cudaMemset(regJacob, 0, sizeof(Jacobian) * 3 * warpField.nodeNum * 6 * warpField.nodeNum); cudaDeviceSynchronize(); #ifdef LOG_EN std::cout << "* regJ" << std::endl; std::cout << "* rReg" << std::endl; #endif threads_per_block = 64; thread_blocks =(warpField.nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< buildRegJacbianKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", NODE_NUM: " << warpField.nodeNum << // ", visibleNodesNum: " << warpField.visibleNodesNum << // std::endl; // build Jacobian fro Regularization term in Error Function buildRegJacbianKernel<<<thread_blocks, threads_per_block>>>(*this, warpField, regJacob, rReg); cudaDeviceSynchronize(); // write regJacob into file for debug #ifdef DEBUG m = 3 * warpField.nodeNum; n = 6 * warpField.nodeNum; writeMatrixToTxt("../logs/regJacob.txt", regJacob, sizeof(Jacobian), m, n, 1, 0, m); #endif // write rReg into file for debug #ifdef DEBUG m = 3 * warpField.nodeNum; n = 1; writeMatrixToTxt("../logs/rReg.txt", rReg, sizeof(Jacobian), m, n, 1, 0, m); #endif #ifdef LOG_EN std::cout << "buildLinearSystem... " << std::endl; #endif buildLinearSystem(warpField); #ifdef LOG_EN std::cout << "Error evaluation ... " << std::endl; #endif currError = 0.0f; for(int cnt = 0; cnt < warpField.nodeNum; cnt++){ if(isnan(rData[cnt]))continue; currError += pow(rData[cnt], 2); } for(int cnt = 0; cnt < warpField.nodeNum * 3; cnt++){ if(isnan(rReg[cnt]))continue; currError += LAMBDA * pow(rReg[cnt], 2); } float change = prevError - currError; std::cout << "iteration: " << iterationNum << ", Error: " << currError << ", change: " << change << std::endl; if(change >= Prop.minIncrement){ prevError = currError; blender.blendMesh(warpField.defGraphMesh, warpField); }else if(change <= 0.0f){ std::cout << " ****** Break-out !!!! ****** "<< std::endl; break; }else{ std::cout << " ICP done ..."<< std::endl; break; } } } } // build Jabocians and Non-Linear system and Solve LU factorization Common steps void nonRigidICP::buildLinearSystem(geometry::defGraph &warpField){ /******************** JD^T * JD ***********************/ cudaMallocManaged(&JdTJd, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); cudaDeviceSynchronize(); cudaMemset(JdTJd, 0, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); cudaDeviceSynchronize(); #ifdef LOG_EN std::cout << "* JD^T * JD" << std::endl; #endif m = 6 * warpField.nodeNum; n = Mdata; cuBlasMatrixMulTrans(dataJacob, dataJacob, JdTJd, m, n, m); // write JdTJd into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/JdTJd.txt", JdTJd, sizeof(Jacobian), m, m, 1, 0, LOG_MAX); #endif /******************** bData ***************************/ // from b = J^T * r cudaMallocManaged(&bData, sizeof(Jacobian) * 6 * warpField.nodeNum); cudaDeviceSynchronize(); cudaMemset(bData, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); cudaDeviceSynchronize(); #ifdef LOG_EN std::cout << "* bData" << std::endl; #endif m = 6 * warpField.nodeNum; k = Mdata; n = 1; cuBlasMatrixMulTrans(dataJacob, rData, bData, m, k, n); // write bData into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/bData.txt", bData, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif cudaFree(dataJacob); /******************** JR^T * JR ***********************/ cudaMallocManaged(&JrTJr, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); cudaDeviceSynchronize(); cudaMemset(JrTJr, 0, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); cudaDeviceSynchronize(); #ifdef LOG_EN std::cout << "* JR^T * JR" << std::endl; #endif alpha = 1.0; beta = 0.0; m = 6 * warpField.nodeNum; n = 3 * warpField.nodeNum; cuBlasMatrixMulTrans(regJacob, regJacob, JrTJr, m, n, m); // write JrTJr into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/JrTJr.txt", JrTJr, sizeof(Jacobian), m, m, 1, 0, LOG_MAX); #endif /******************** bReg ***************************/ // from b = J^T * r cudaMallocManaged(&bReg, sizeof(Jacobian) * 6 * warpField.nodeNum); cudaDeviceSynchronize(); cudaMemset(bReg, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); cudaDeviceSynchronize(); #ifdef LOG_EN std::cout << "* bReg" << std::endl; #endif m = 6 * warpField.nodeNum; k = 3 * warpField.nodeNum; n = 1; cuBlasMatrixMulTrans(regJacob, rReg, bReg, m, k, n); // write bReg into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/bReg.txt", bReg, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif cudaFree(regJacob); /******************** A ***************************/ // A = JT * J #ifdef LOG_EN std::cout << "* JT * J" << std::endl; #endif cudaMallocManaged(&JTJ, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); cudaDeviceSynchronize(); cudaMemset(JTJ, 0, sizeof(Jacobian) * 6 * warpField.nodeNum * 6 * warpField.nodeNum); cudaDeviceSynchronize(); // J^T * J = JD^T * JD + LAMBDA * JReg^T * JReg alpha = 1.0f; beta = LAMBDA; m = 6 * warpField.nodeNum; cuBlasMatrixSum(alpha, JdTJd, beta, JrTJr, JTJ, m); // write JTJ into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/JTJ.txt", JTJ, sizeof(Jacobian), m, m, 1, 0, LOG_MAX); #endif cudaFree(JdTJd); cudaFree(JrTJr); /******************** b ***************************/ // b = bData + LAMBDA * bReg #ifdef LOG_EN std::cout << "* b" << std::endl; #endif cudaMallocManaged(&b, sizeof(Jacobian) * 6 * warpField.nodeNum); cudaDeviceSynchronize(); cudaMemset(b, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); cudaDeviceSynchronize(); alpha = -1.0f; beta = LAMBDA; m = 6 * warpField.nodeNum; n = 1; cuBlasVectorSum(alpha, bData, beta, bReg, b, m); // write b into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/b.txt", b, sizeof(Jacobian), m, n, 1, 0, LOG_MAX); #endif cudaFree(bData); cudaFree(bReg); /****************Solve Non-Linear System***********/ #ifdef LOG_EN std::cout << "Solve Linear system ... " << std::endl; #endif cudaMallocManaged(&result, sizeof(Jacobian) * 6 * warpField.nodeNum); cudaDeviceSynchronize(); cudaMemset(result, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); // todo... correct size cudaDeviceSynchronize(); solveSparceLinearSystem(warpField); // // solveLinearSystem(warpField); // // cuSolveLinearSystem(warpField); cudaFree(JTJ); cudaFree(b); cudaFree(result); } void nonRigidICP::cuSolveLinearSystem(geometry::defGraph &warpField){ /******************** cuSolver **************************/ // create Solver handler cusolverDnCreate(&cuSolverHandle); m = 6 * warpField.nodeNum; n = 6 * warpField.nodeNum; int Lwork = 0; cusolverDnSgetrf_bufferSize(cuSolverHandle, m, n, JTJ, m, &Lwork); std::cout << "* bufferSize ... , Lwork: " << Lwork << std::endl; // set up workspace float* d_workspace; int* d_ipiv, *d_info; int h_info = 0; int lda = m; cudaMalloc(&d_workspace, Lwork * sizeof(float)); cudaMalloc(&d_ipiv, min(m,n) * sizeof(int)); cudaMalloc(&d_info, sizeof(int)); cudaDeviceSynchronize(); // decomposition std::cout << "* LU Decomposition of A ... "<< std::endl; cuSolverStatus = cusolverDnSgetrf(cuSolverHandle, m, n, JTJ, lda, d_workspace, d_ipiv, d_info); cudaMemcpy(&h_info, d_info, sizeof(int), cudaMemcpyDeviceToHost); if(cuSolverStatus != CUSOLVER_STATUS_SUCCESS) { std::cerr<<"failed to LU, info = "<<h_info<<std::endl; } else { std::cerr<<"done LU, info = "<<h_info<<std::endl; } // solve int ldb = n; std::cout << "* Solving op(A)x = b " << std::endl; cuSolverStatus = cusolverDnSgetrs(cuSolverHandle, CUBLAS_OP_N, n, 1, JTJ, n, d_ipiv, b, ldb, d_info); cudaMemcpy(&h_info, d_info, sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); if(cuSolverStatus != CUSOLVER_STATUS_SUCCESS) { std::cerr<<"failed to solve, info = "<<h_info<<std::endl; } else { std::cerr<<"solved, info = "<<h_info<<std::endl; } if (d_info ) { cudaFree(d_info); } if (d_workspace) { cudaFree(d_workspace); } if (d_ipiv ) { cudaFree(d_ipiv);} const float _alfa = 1.0f; const float _beta = 1.0f; m = 6 * warpField.nodeNum; n = 1; cudaMemset(result, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); // todo.... result value is not correct!!! cudaDeviceSynchronize(); cuBlasVectorSum(_alfa, result, _beta, b, result, m); warpField.updateNodesDQ(result); // write result into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/result.txt", result, sizeof(Jacobian), m, n, 1, 0, m); #endif } void nonRigidICP::solveLinearSystem(geometry::defGraph &warpField){ /******************** Eigen Chol. Decomposition**************************/ m = 6 * warpField.nodeNum; n = 6 * warpField.nodeNum; Jacobian *h_JTJ = new Jacobian[m * n]; Jacobian *h_JTr = new Jacobian[m * 1]; memset(h_JTJ, 0, sizeof(Jacobian) * m * n); memset(h_JTr, 0, sizeof(Jacobian) * m * 1); cudaMemcpy(h_JTJ, JTJ, sizeof(Jacobian) * m * n, cudaMemcpyDeviceToHost); cudaMemcpy(h_JTr, b, sizeof(Jacobian) * m * 1, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic> A; A = (Eigen::Map<Eigen::Matrix<float, 6*NODE_NUM, 6*NODE_NUM> >(h_JTJ)).cast <double> (); Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic> _b; _b = (Eigen::Map<Eigen::Matrix<float, 6*NODE_NUM, 1> >(h_JTr)).cast <double> (); double scaling = 1 / A.maxCoeff(); _b *= scaling; A *= scaling; float alfa = Prop.regularization * A.maxCoeff(); A = A + alfa * Eigen::MatrixXd::Identity(6*NODE_NUM, 6*NODE_NUM) * 1/iterationNum; // std::cout << "Eigen Solve LTVL" << std::endl; Eigen::Matrix<double, 6*NODE_NUM, 1> increment = A.ldlt().solve(_b); Jacobian *h_increment = new Jacobian[m * 1]; memset(h_increment, 0, sizeof(Jacobian) * m * 1); for(int cnt=0; cnt<m; cnt++){ h_increment[cnt] = increment[cnt]; } const float _alfa = 1.0f; const float _beta = 1.0f; m = 6 * warpField.nodeNum; n = 1; cudaMemset(result, 0, sizeof(Jacobian) * m * n); // todo.... result value is not correct!!! cudaMemcpy(result, h_increment, sizeof(Jacobian) * m * n, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); // std::cout << "apply increments ... " << std::endl; cuBlasVectorSum(_alfa, result, _beta, b, result, m); // std::cout << "updating WarpField ... " << std::endl; warpField.updateNodesDQ(b); // std::cout << "update done ... " << std::endl; // write result into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/result.txt", result, sizeof(Jacobian), m, n, 1, 0, m); #endif } void nonRigidICP::solveSparceLinearSystem(geometry::defGraph &warpField){ // // --- Initialize cuSPARSE cusparseHandle_t handle; cusparseCreate(&handle); const int Nrows = 6 * warpField.nodeNum; // --- Number of rows const int Ncols = 6 * warpField.nodeNum; // --- Number of columns const int N = Nrows; float *identityMat; cudaMalloc(&identityMat, Nrows * Ncols * sizeof(float)); cudaMemset(identityMat, 0, Nrows * Ncols * sizeof(float)); cudaDeviceSynchronize(); int threads_per_block = 64; int thread_blocks =(Nrows * Ncols + threads_per_block - 1) / threads_per_block; initIdentityGPU <<<thread_blocks, threads_per_block>>>(identityMat, Nrows, Ncols, Prop.regularization/iterationNum); cudaDeviceSynchronize(); alpha = 1.0f; beta = 1.0f; cuBlasVectorSum(alpha, JTJ, beta, identityMat, JTJ, Ncols); cudaDeviceSynchronize(); // --- Descriptor for sparse matrix A cusparseMatDescr_t descrA; cusparseCreateMatDescr(&descrA); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO); int nnz = 0; // --- Number of nonzero elements in dense matrix const int lda = Nrows; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row int *d_nnzPerVector; cudaMalloc(&d_nnzPerVector, Nrows * sizeof(*d_nnzPerVector)); cudaMemset(d_nnzPerVector, 0, Nrows * sizeof(*d_nnzPerVector)); cusparseSnnz(handle, CUSPARSE_DIRECTION_ROW, Nrows, Ncols, descrA, JTJ, lda, d_nnzPerVector, &nnz); // --- Host side number of nonzero elements per row int *h_nnzPerVector = (int *)malloc(Nrows * sizeof(*h_nnzPerVector)); cudaMemcpy(h_nnzPerVector, d_nnzPerVector, Nrows * sizeof(*h_nnzPerVector), cudaMemcpyDeviceToHost); // printf("Number of nonzero elements in dense matrix = %i\n\n", nnz); // for (int i = 0; i < Nrows; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]); // printf("\n"); // // --- Device side dense matrix float *d_A; cudaMalloc(&d_A, nnz * sizeof(*d_A)); int *d_A_RowIndices; cudaMalloc(&d_A_RowIndices, (Nrows + 1) * sizeof(*d_A_RowIndices)); int *d_A_ColIndices; cudaMalloc(&d_A_ColIndices, nnz * sizeof(*d_A_ColIndices)); cusparseSdense2csr(handle, Nrows, Ncols, descrA, JTJ, lda, d_nnzPerVector, d_A, d_A_RowIndices, d_A_ColIndices); // --- Host side dense matrix float *h_A = (float *)malloc(nnz * sizeof(*h_A)); int *h_A_RowIndices = (int *)malloc((Nrows + 1) * sizeof(*h_A_RowIndices)); int *h_A_ColIndices = (int *)malloc(nnz * sizeof(*h_A_ColIndices)); cudaMemcpy(h_A, d_A, nnz*sizeof(*h_A), cudaMemcpyDeviceToHost); cudaMemcpy(h_A_RowIndices, d_A_RowIndices, (Nrows + 1) * sizeof(*h_A_RowIndices), cudaMemcpyDeviceToHost); cudaMemcpy(h_A_ColIndices, d_A_ColIndices, nnz * sizeof(*h_A_ColIndices), cudaMemcpyDeviceToHost); // for (int i = 0; i < nnz; ++i) printf("A[%i] = %.0f ", i, h_A[i]); printf("\n"); // for (int i = 0; i < (Nrows + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n"); // for (int i = 0; i < nnz; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]); // --- Allocating and defining dense host and device data vectors float *h_b = (float *)malloc(Nrows * sizeof(float)); cudaMemcpy(h_b, b, Nrows * sizeof(float), cudaMemcpyDeviceToHost); // --- Allocating the host and device side result vector float *h_x = (float *)malloc(Ncols * sizeof(float)); float *d_x; cudaMalloc(&d_x, Ncols * sizeof(float)); // --- CUDA solver initialization cusolverSpHandle_t solver_handle; cusolverSpCreate(&solver_handle); // --- Using Cholesky factorization int singularity; cusolverSpScsrlsvcholHost(solver_handle, N, nnz, descrA, h_A, h_A_RowIndices, h_A_ColIndices, h_b, 0.000001, 0, h_x, &singularity); cudaMemcpy(b, h_b, Nrows * sizeof(float), cudaMemcpyHostToDevice); // printf("Showing the results...\n"); // for (int i = 0; i < N; i++) printf("%f\n", h_x[i]); Jacobian *h_increment = new Jacobian[m * 1]; memset(h_increment, 0, sizeof(Jacobian) * m * 1); const float _alfa = 1.0f; const float _beta = 1.0f; cudaMemset(result, 0, sizeof(Jacobian) * 6 * warpField.nodeNum); // todo.... result value is not correct!!! cudaDeviceSynchronize(); cuBlasVectorSum(_alfa, result, _beta, b, result, N); warpField.updateNodesDQ(result); // write result into file for debug #ifdef DEBUG writeMatrixToTxt("../logs/result.txt", result, sizeof(Jacobian), N, N, 1, 0, N); #endif cudaFree(d_nnzPerVector); cudaFree(d_A); cudaFree(d_A_RowIndices); cudaFree(d_A_ColIndices); cudaFree(d_x); cudaFree(identityMat); delete[] h_nnzPerVector; delete[] h_A; delete[] h_A_RowIndices; delete[] h_A_ColIndices; delete[] h_b; delete[] h_x; } /*************************************************************************************************/ // Multiply the arrays A and B on GPU and save the result in C // Calculate: C = α op ( A ) * op ( B ) + β C // (m X k) * (k X n) = (m X n) void nonRigidICP::cuBlasMatrixMul(const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda = m, ldb = k, ldc = m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); // Do the actual multiplication cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); // Destroy the handle cublasDestroy(handle); } // Multiply the arrays A and B on GPU and save the result in C // Calculate: C = α op ( A )T * op ( B ) + β C // (m X k) * (k X n) = (m X n) void nonRigidICP::cuBlasMatrixMulTrans(const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda = k, ldb = k, ldc = m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); // Do the actual multiplication cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); // Destroy the handle cublasDestroy(handle); } // Multiply the arrays A and B on GPU and save the result in C // Calculate: C = α op ( A ) + β op ( B ) // (m X k) * (k X n) = (m X n) void nonRigidICP::cuBlasMatrixSum(const float alf, const float *A, const float bet, const float *B, float *C, const int m){ int lda=m,ldb=m,ldc=m; int n = m; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); // Do the Transpose cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, alpha, A, lda, beta, B, ldb, C, ldc); // Destroy the handle cublasDestroy(handle); } // Multiply the arrays A and B on GPU and save the result in C // Calculate: C = α op ( A ) + β op ( B ) // (m X k) * (k X n) = (m X n) void nonRigidICP::cuBlasMatrixSumTrans(const float alf, const float *A, const float bet, const float *B, float *C, const int m){ int lda=m,ldb=m,ldc=m; int n = m; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); // Do the Transpose cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, alpha, A, lda, beta, B, ldb, C, ldc); // Destroy the handle cublasDestroy(handle); } // Sum Vector A and B on GPU and save the result in C // Calculate: C = α op ( A ) + β op ( B ) // (m X k) * (k X n) = (m X n) void nonRigidICP::cuBlasVectorSum(const float alf, const float *A, const float bet, const float *B, float *C, const int m){ int n =1; int lda=m,ldb=m,ldc=m; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); // Do the Transpose cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, alpha, A, lda, beta, B, ldb, C, ldc); // Destroy the handle cublasDestroy(handle); } // Returns the Transpose of A -> C , α =1, β =0, op(A)= AT // Calculate: C = α op ( A ) + β op ( B ) // A(m * n) void nonRigidICP::cuBlasMatrixTrans(const float *A, float *C, const int m, const int n){ int lda=n,ldb=m,ldc=m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); // Do the Transpose cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, alpha, A, lda, beta, A, ldb, C, ldc); // Destroy the handle cublasDestroy(handle); } /*************************************************************************************************/ void nonRigidICP::writeMatrixToTxt(const char* fileName, float *matrix, size_t size, int rows, int cols, int mode, int from, int to){ float *h_matrix = new float[rows * cols]; memset(h_matrix, 0, size * rows * cols); cudaMemcpy(h_matrix, matrix, size * rows * cols, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); std::ofstream file(fileName,std::ios::ate); if (file.is_open()){ file << fileName <<", rows: " << rows << ", cols: " << cols << std::endl; file << "{ " << std::endl; for (size_t i = from; i < to; i++){ file << "[ " << i ; for (size_t j = 0; j < cols; j++){ if(mode == 0){ // row major file << std::fixed << std::setprecision(6) << " " << matrix[i * cols + j]; }else if(mode ==1){ // column major file << std::fixed << std::setprecision(6) << " " << matrix[i + rows * j]; } } file << " ]"<< std::endl; } file << " }"<< std::endl; file.close(); } else std::cout << "Unable to open file"; delete[] h_matrix; } /*************************************************************************************************/ } // end namespace solver } // end namespace DynaMap
9782cdc10305e4050a18d4f3b288b12aaf723bb8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright 2020 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ /* Copyright (c) 2018 Anakin Authors, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "modules/perception/inference/migraphx/plugins/dfmb_psroi_align_plugin.h" #include "modules/perception/inference/migraphx/plugins/kernels.h" namespace apollo { namespace perception { namespace inference { template <typename Dtype> __global__ void DFMBPSROIAlignForward( const int nthreads, const Dtype *bottom_data, const Dtype heat_map_a, const Dtype heat_map_b, const Dtype pad_ratio, const int batch_size, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype *bottom_rois, const Dtype *bottom_trans, const bool no_trans, const Dtype trans_std, const int sample_per_part, const int output_channel, const int group_height, const int group_width, const int part_height, const int part_width, const int num_classes, const int channels_each_class, Dtype *top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_channel; int n = index / pooled_width / pooled_height / output_channel; // [start, end) interval for spatial sampling const Dtype *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; if (roi_batch_ind < 0 || roi_batch_ind >= batch_size) { continue; } Dtype pad_w = (offset_bottom_rois[3] - offset_bottom_rois[1] + 1) * pad_ratio; Dtype pad_h = (offset_bottom_rois[4] - offset_bottom_rois[2] + 1) * pad_ratio; Dtype roi_start_w = (offset_bottom_rois[1] - pad_w - heat_map_b) / heat_map_a; Dtype roi_start_h = (offset_bottom_rois[2] - pad_h - heat_map_b) / heat_map_a; Dtype roi_end_w = (offset_bottom_rois[3] + pad_w - heat_map_b) / heat_map_a; Dtype roi_end_h = (offset_bottom_rois[4] + pad_h - heat_map_b) / heat_map_a; // Force too small ROIs to be 1x1 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 // Compute w and h at bottom Dtype bin_size_h = roi_height / (Dtype)pooled_height; Dtype bin_size_w = roi_width / (Dtype)pooled_width; Dtype sub_bin_size_h = bin_size_h / (Dtype)sample_per_part; Dtype sub_bin_size_w = bin_size_w / (Dtype)sample_per_part; int part_h = floor((Dtype)ph) / pooled_height * part_height; int part_w = floor((Dtype)pw) / pooled_width * part_width; int class_id = ctop / channels_each_class; Dtype trans_x = no_trans ? (Dtype)0 : bottom_trans[(((n * num_classes + class_id) * 2) * part_height + part_h) * part_width + part_w] * trans_std; Dtype trans_y = no_trans ? (Dtype)0 : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_height + part_h) * part_width + part_w] * trans_std; int hstart = (Dtype)ph * bin_size_h + roi_start_h + trans_y * roi_height; int wstart = (Dtype)pw * bin_size_w + roi_start_w + trans_x * roi_width; Dtype sum = 0; int count = 0; int gh = floor((Dtype)ph * group_height / pooled_height); int gw = floor((Dtype)pw * group_width / pooled_width); gh = min(max(gh, 0), group_height - 1); gw = min(max(gw, 0), group_width - 1); const Dtype *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; for (int ih = 0; ih < sample_per_part; ++ih) { for (int iw = 0; iw < sample_per_part; ++iw) { Dtype w = wstart + (iw + 0.5) * sub_bin_size_w; Dtype h = hstart + (ih + 0.5) * sub_bin_size_h; // bilinear interpolation if (w <= -1 || w >= width || h <= -1 || h >= height) { continue; } int c = (ctop * group_height + gh) * group_width + gw; int x1 = floor(w); int x2 = ceil(w); int y1 = floor(h); int y2 = ceil(h); Dtype dist_x = (Dtype)w - x1; Dtype dist_y = (Dtype)h - y1; const Dtype *data = offset_bottom_data + c * height * width; Dtype value11 = (x1 >= 0 && x1 < width && y1 >= 0 && y1 < height) ? data[y1 * width + x1] : Dtype(0.0); Dtype value12 = (x1 >= 0 && x1 < width && y2 >= 0 && y2 < height) ? data[y2 * width + x1] : Dtype(0.0); Dtype value21 = (x2 >= 0 && x2 < width && y1 >= 0 && y1 < height) ? data[y1 * width + x2] : Dtype(0.0); Dtype value22 = (x2 >= 0 && x2 < width && y2 >= 0 && y2 < height) ? data[y2 * width + x2] : Dtype(0.0); Dtype value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; sum += value; count++; } } top_data[index] = count == 0 ? (Dtype)0 : sum / count; } } int DFMBPSROIAlignPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { const float *bottom_data = reinterpret_cast<const float *>(inputs[0]); const float *bottom_rois = reinterpret_cast<const float *>(inputs[1]); const float *bottom_trans = no_trans_ ? nullptr : reinterpret_cast<const float *>(inputs[2]); float *top_data = reinterpret_cast<float *>(outputs[0]); int channels_each_class = no_trans_ ? output_channel_ : output_channel_ / num_classes_; BASE_GPU_CHECK( hipMemsetAsync(top_data, 0, output_size_ * sizeof(float), stream)); BASE_GPU_CHECK(hipDeviceSynchronize()); int block_size = (output_size_ - 1) / thread_size_ + 1; hipLaunchKernelGGL(( DFMBPSROIAlignForward), dim3(block_size), dim3(thread_size_), 0, stream, output_size_, bottom_data, heat_map_a_, heat_map_b_, pad_ratio_, batchSize, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, bottom_trans, no_trans_, trans_std_, sample_per_part_, output_channel_, group_height_, group_width_, part_height_, part_width_, num_classes_, channels_each_class, top_data); return 0; } } // namespace inference } // namespace perception } // namespace apollo
9782cdc10305e4050a18d4f3b288b12aaf723bb8.cu
/****************************************************************************** * Copyright 2020 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ /* Copyright (c) 2018 Anakin Authors, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "modules/perception/inference/migraphx/plugins/dfmb_psroi_align_plugin.h" #include "modules/perception/inference/migraphx/plugins/kernels.h" namespace apollo { namespace perception { namespace inference { template <typename Dtype> __global__ void DFMBPSROIAlignForward( const int nthreads, const Dtype *bottom_data, const Dtype heat_map_a, const Dtype heat_map_b, const Dtype pad_ratio, const int batch_size, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype *bottom_rois, const Dtype *bottom_trans, const bool no_trans, const Dtype trans_std, const int sample_per_part, const int output_channel, const int group_height, const int group_width, const int part_height, const int part_width, const int num_classes, const int channels_each_class, Dtype *top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_channel; int n = index / pooled_width / pooled_height / output_channel; // [start, end) interval for spatial sampling const Dtype *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; if (roi_batch_ind < 0 || roi_batch_ind >= batch_size) { continue; } Dtype pad_w = (offset_bottom_rois[3] - offset_bottom_rois[1] + 1) * pad_ratio; Dtype pad_h = (offset_bottom_rois[4] - offset_bottom_rois[2] + 1) * pad_ratio; Dtype roi_start_w = (offset_bottom_rois[1] - pad_w - heat_map_b) / heat_map_a; Dtype roi_start_h = (offset_bottom_rois[2] - pad_h - heat_map_b) / heat_map_a; Dtype roi_end_w = (offset_bottom_rois[3] + pad_w - heat_map_b) / heat_map_a; Dtype roi_end_h = (offset_bottom_rois[4] + pad_h - heat_map_b) / heat_map_a; // Force too small ROIs to be 1x1 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 // Compute w and h at bottom Dtype bin_size_h = roi_height / (Dtype)pooled_height; Dtype bin_size_w = roi_width / (Dtype)pooled_width; Dtype sub_bin_size_h = bin_size_h / (Dtype)sample_per_part; Dtype sub_bin_size_w = bin_size_w / (Dtype)sample_per_part; int part_h = floor((Dtype)ph) / pooled_height * part_height; int part_w = floor((Dtype)pw) / pooled_width * part_width; int class_id = ctop / channels_each_class; Dtype trans_x = no_trans ? (Dtype)0 : bottom_trans[(((n * num_classes + class_id) * 2) * part_height + part_h) * part_width + part_w] * trans_std; Dtype trans_y = no_trans ? (Dtype)0 : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_height + part_h) * part_width + part_w] * trans_std; int hstart = (Dtype)ph * bin_size_h + roi_start_h + trans_y * roi_height; int wstart = (Dtype)pw * bin_size_w + roi_start_w + trans_x * roi_width; Dtype sum = 0; int count = 0; int gh = floor((Dtype)ph * group_height / pooled_height); int gw = floor((Dtype)pw * group_width / pooled_width); gh = min(max(gh, 0), group_height - 1); gw = min(max(gw, 0), group_width - 1); const Dtype *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; for (int ih = 0; ih < sample_per_part; ++ih) { for (int iw = 0; iw < sample_per_part; ++iw) { Dtype w = wstart + (iw + 0.5) * sub_bin_size_w; Dtype h = hstart + (ih + 0.5) * sub_bin_size_h; // bilinear interpolation if (w <= -1 || w >= width || h <= -1 || h >= height) { continue; } int c = (ctop * group_height + gh) * group_width + gw; int x1 = floor(w); int x2 = ceil(w); int y1 = floor(h); int y2 = ceil(h); Dtype dist_x = (Dtype)w - x1; Dtype dist_y = (Dtype)h - y1; const Dtype *data = offset_bottom_data + c * height * width; Dtype value11 = (x1 >= 0 && x1 < width && y1 >= 0 && y1 < height) ? data[y1 * width + x1] : Dtype(0.0); Dtype value12 = (x1 >= 0 && x1 < width && y2 >= 0 && y2 < height) ? data[y2 * width + x1] : Dtype(0.0); Dtype value21 = (x2 >= 0 && x2 < width && y1 >= 0 && y1 < height) ? data[y1 * width + x2] : Dtype(0.0); Dtype value22 = (x2 >= 0 && x2 < width && y2 >= 0 && y2 < height) ? data[y2 * width + x2] : Dtype(0.0); Dtype value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; sum += value; count++; } } top_data[index] = count == 0 ? (Dtype)0 : sum / count; } } int DFMBPSROIAlignPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { const float *bottom_data = reinterpret_cast<const float *>(inputs[0]); const float *bottom_rois = reinterpret_cast<const float *>(inputs[1]); const float *bottom_trans = no_trans_ ? nullptr : reinterpret_cast<const float *>(inputs[2]); float *top_data = reinterpret_cast<float *>(outputs[0]); int channels_each_class = no_trans_ ? output_channel_ : output_channel_ / num_classes_; BASE_GPU_CHECK( cudaMemsetAsync(top_data, 0, output_size_ * sizeof(float), stream)); BASE_GPU_CHECK(cudaDeviceSynchronize()); int block_size = (output_size_ - 1) / thread_size_ + 1; DFMBPSROIAlignForward<<<block_size, thread_size_, 0, stream>>>( output_size_, bottom_data, heat_map_a_, heat_map_b_, pad_ratio_, batchSize, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, bottom_trans, no_trans_, trans_std_, sample_per_part_, output_channel_, group_height_, group_width_, part_height_, part_width_, num_classes_, channels_each_class, top_data); return 0; } } // namespace inference } // namespace perception } // namespace apollo
ea5442c671c433aef174391525fbbaf8ddcc4541.hip
// !!! This is a file automatically generated by hipify!!! //Training of the CNN is done using Keras. After training for 10 epochs, the obtained accuracy on the training data set is 99.70 and on the test data set is 99.14. //This model implements the following layes in order- 2DConvolution---->Maxpooling---->2D Convolution---->Maxpooling---->Fully_connected layer---->Fully_connected layer. //The image is a 28*28 greyscale image. The specifications of the layers are as follows: //Layer_0: Convolution: 32 3*3 kernels with no padding and 1 stride. //Layer_1: Maxpooling: 2*2 filters with with no padding and 1 stride. //Layer_2: Convolution: 64 3*3 kernels with no padding and 1 stride. //Layer_3: Maxpooling: 2*2 filters with with no padding and 1 stride. //Layer_4: Flattening //Layer_5: Fully connected / dense layer with 1024 output units. //Layer_6: Dropout (done during training only). //Layer_7: Fully connected / dense layer with 10 output units. //All arrays and matrices are designed to be row ordered in this implementation. #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdlib.h> #include<math.h> //Kernel that does convolution. This convolution is done by each thread identifying that patch or portion of the image that it is responsible for its result and does the multiplication and addition of it's patche's values with the suitable kernel. //The depth of the output image is the number of kernels. __global__ void convolution_kernel(int h, int w, int d, double* gpu_in, int k_h, int k_w, int k_d, double* kernel_weights, double* kernel_biases, int num_kernels, int op_h, int op_w, int op_d, double* gpu_out) { //Identifying threads by their IDs. int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; int deep = blockDim.z *blockIdx.z + threadIdx.z; //Return if thread out of bounds if (row >= op_h || col >= op_w || deep >= op_d) return; double out=0.0; int kernel_pointer = 0; //Each thread/each output node identifies the corresponding element in the matrix that it is responsible to multiply-add. for (int depth_pointer = 0; depth_pointer < k_d; depth_pointer++) { for (int row_pointer = 0; row_pointer < k_h; row_pointer++) { for (int column_pointer = 0; column_pointer < k_w; column_pointer++) { out += gpu_in[((row*w + col) + row_pointer * w + column_pointer + h * w*depth_pointer)] * kernel_weights[kernel_pointer + deep * k_h*k_w*k_d]; kernel_pointer++; } } } //Bias addition and relu activation. One bias is applied to one output image layer, since one bias is applicable to one kernel. //Relu activation : relu(a)=max(0,a). If the value is less than 0 then it becomes 0, else it is retained. if (out + kernel_biases[deep] < 0.0) gpu_out[row*op_w + col + deep * op_h*op_w] = 0.0l; else gpu_out[row*op_w + col + deep * op_h*op_w] = out + kernel_biases[deep]; } //Kernel that does maxpooling. __global__ void maxpool_kernel(int h, int w, int d, double* gpu_in, int pool_height, int pool_width, int op_h, int op_w, int op_d, double* gpu_out) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; int deep = blockDim.z *blockIdx.z + threadIdx.z; if (row >= op_h || col >= op_w || deep >= op_d) return; double out; double max; int kernel_pointer = 0; //The maximum is chosen to be the first element of the pool filter. max = gpu_in[(deep*w*h) + (row*pool_height)*w + (col*pool_width)]; //We follow throgh all the elements within the filter's size and look for the maximum element and the corresponding maximum element becomes the thread's value. for (int row_pointer = 0; row_pointer < pool_height; row_pointer++) { for (int column_pointer = 0; column_pointer < pool_width; column_pointer++) { if (gpu_in[(deep*w*h) + (row*pool_height)*w + (col*pool_width) + (row_pointer*w) + (column_pointer)] > max) max = gpu_in[(deep*w*h) + (row*pool_height)*w + (col*pool_width) + (row_pointer*w) + (column_pointer)]; } } gpu_out[deep*op_w*op_h + row * op_w + col] = max; } //This kernel implements the fully connected layers. __global__ void dense_kernel(int num_input, int num_output, double* gpu_in, double* weights, double* biases, double* gpu_out, int num_classes) { int tid = blockDim.x*blockIdx.x + threadIdx.x; if (tid >= num_output) return; double sum = 0.0l; //The weights are extracted from Keras such that all the weights to one output node appears together, followed by weights to the next node and so on. //Thus, each output node will be a multiply add of adjacent weight values with the input nodes. for (int count = 0; count < num_input; count++) { sum += gpu_in[count] * weights[tid*num_input + count]; } sum += biases[tid]; //Activation: If the layer is the final layer, then don't do anything, otherwise relu activation max(0,value) is taken. if ((num_output) != num_classes) { if (sum < 0.0) { sum = 0.0l; } } gpu_out[tid] = sum; } int main() { //-------------------------------Reading all the weights and biases and the original image----------------------// //File pointers to all the weights and biases and the image. FILE * pFileImg; FILE * pFileW0; FILE * pFileB0; FILE * pFileW2; FILE * pFileB2; FILE * pFileDW5; FILE * pFileDB5; FILE * pFileDW7; FILE * pFileDB7; //Note: The weights are pulled out after training the mnist digit recognition dataset on keras with handwritten digits 0-9. The images are greysvale and hence to start with they have only one channel. //Weights are pulled out and inputted into the respective arrays. //Pulling out image values double* img_arr = (double *)malloc(28 * 28 * sizeof(double)); pFileImg = fopen("/home/meghanap/Image_RO.txt", "r"); if (pFileImg == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 784; i++) { fscanf(pFileImg, "%lf", &img_arr[i]); } //Pulling out kernel weights for first conv layer. double* W0_arr = (double *)malloc(288 * sizeof(double)); pFileW0 = fopen("/home/meghanap/W0_RO.txt", "r"); if (pFileW0 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 288; i++) { fscanf(pFileW0, "%lf", &W0_arr[i]); } //Pulling out kernel biases for first conv layer. double* B0_arr = (double *)malloc(32 * sizeof(double)); pFileB0 = fopen("/home/meghanap/B0.txt", "r"); if (pFileB0 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 32; i++) { fscanf(pFileB0, "%lf", &B0_arr[i]); } //Pulling out kernel weights for second conv layer. double* W2_arr = (double *)malloc(18432 * sizeof(double)); pFileW2 = fopen("/home/meghanap/W2_RO.txt", "r"); if (pFileW2 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 18432; i++) { fscanf(pFileW2, "%lf", &W2_arr[i]); } //Pulling out kernel biases for second conv layer. double* B2_arr = (double *)malloc(64 * sizeof(double)); pFileB2 = fopen("/home/meghanap/B2.txt", "r"); if (pFileB2 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 64; i++) { fscanf(pFileB2, "%lf", &B2_arr[i]); } //Pulling out weights for first fully connected layer. double* DW5_arr = (double *)malloc(1638400 * sizeof(double)); pFileDW5 = fopen("/home/meghanap/DW5_RO.txt", "r"); if (pFileDW5 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 1638400; i++) { fscanf(pFileDW5, "%lf", &DW5_arr[i]); } //Pulling out biases for first fully connected layer. double* DB5_arr = (double *)malloc(1024 * sizeof(double)); pFileDB5 = fopen("/home/meghanap/DB5.txt", "r"); if (pFileDB5 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 1024; i++) { fscanf(pFileDB5, "%lf", &DB5_arr[i]); } //Pulling out weights for second fully connected layer. double* DW7_arr = (double *)malloc(10240 * sizeof(double)); pFileDW7 = fopen("/home/meghanap/DW7_RO.txt", "r"); if (pFileDW7 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 10240; i++) { fscanf(pFileDW7, "%lf", &DW7_arr[i]); } //Pulling out biases for second fully connected layer. double* DB7_arr = (double *)malloc(10 * sizeof(double)); pFileDB7 = fopen("/home/meghanap/DB7.txt", "r"); if (pFileDB7 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 10; i++) { fscanf(pFileDB7, "%lf", &DB7_arr[i]); } //-------------------------------------Reading done------------------------------------------------// int number_of_classes = 10; hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); int max_threads_per_block = prop.maxThreadsPerBlock; //Convolution kernel preparation. int input_image_height = 28; int input_image_width = 28; int input_image_depth = 1; int kernel_height = 3; int kernel_width = 3; int kernel_depth = 1; int no_of_kernels = 32; int output_image_height = input_image_height - kernel_height + 1; int output_image_width = input_image_width - kernel_width + 1; int output_image_depth = no_of_kernels; //Defined 3 D blocks with z_threads=no_of_kernels and x_threads*y_threads*z_threads=max_threads_per_block. So, if x_threads=y_threads, then x_threads=sqrt(max_threads_per_block/z_threads). //Defined 2 D grids. int z_threads = no_of_kernels; int x_threads = sqrt(max_threads_per_block / z_threads); int y_threads = x_threads; dim3 blockdim0(x_threads, y_threads, z_threads); dim3 griddim0(output_image_width / x_threads, output_image_height / y_threads, 1); //Copying the image into GPU. double *gpu_img0; hipMalloc((void **)&gpu_img0, 28 * 28 * sizeof(double)); hipMemcpy(gpu_img0, img_arr, 28 * 28 * sizeof(double), hipMemcpyHostToDevice); //Copying the kernel weights into GPU. double *kernel_weights0; hipMalloc((void **)&kernel_weights0, 3 * 3 * 32 * sizeof(double)); hipMemcpy(kernel_weights0, W0_arr, 3 * 3 * 32 * sizeof(double), hipMemcpyHostToDevice); //Copying kernel biases into GPU. double *kernel_biases0; hipMalloc((void **)&kernel_biases0, 32 * sizeof(double)); hipMemcpy(kernel_biases0, B0_arr, 32 * sizeof(double), hipMemcpyHostToDevice); //Creating output array inside GPU. double *gpu_out0; hipMalloc((void **)&gpu_out0, 26 * 26 * 32 * sizeof(double)); convolution_kernel << <griddim0, blockdim0 >> > (input_image_height, input_image_width, input_image_depth, gpu_img0, kernel_height, kernel_width, kernel_depth, kernel_weights0, kernel_biases0, no_of_kernels, output_image_height, output_image_width, output_image_depth, gpu_out0); double* layer_0 = (double *)malloc(26 * 26 * 32 * sizeof(double)); hipMemcpy(layer_0, gpu_out0, 26 * 26 * 32 * sizeof(double), hipMemcpyDeviceToHost); //***layer_0 is the output from the first layer. //Free all the unnecessary things from the GPU to make space for the next kernel. hipFree(gpu_img0); hipFree(kernel_weights0); hipFree(kernel_biases0); hipFree(gpu_out0); //Maxpooling layer kernel preparation. int pool_height = 3; int pool_width = 3; input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; z_threads = input_image_depth; x_threads = sqrt(max_threads_per_block / z_threads); y_threads = x_threads; output_image_height = (input_image_height - (input_image_height % pool_height)) / pool_height; output_image_width = (input_image_width - (input_image_width % pool_width)) / pool_width; output_image_depth = input_image_depth; dim3 blockdim1(x_threads, y_threads, z_threads); dim3 griddim1(output_image_width / x_threads, output_image_height / y_threads, 1); //Copying the previous output into GPU. double *gpu_in1; hipMalloc((void **)&gpu_in1, input_image_height*input_image_width*input_image_depth * sizeof(double)); hipMemcpy(gpu_in1, layer_0, input_image_height*input_image_width*input_image_depth * sizeof(double), hipMemcpyHostToDevice); //Creating output array inside GPU. double *gpu_out1; hipMalloc((void **)&gpu_out1, output_image_height*output_image_width*output_image_depth * sizeof(double)); maxpool_kernel << <griddim1, blockdim1 >> > (input_image_height, input_image_width, input_image_depth, gpu_in1, pool_height, pool_width, output_image_height, output_image_width, output_image_depth, gpu_out1); double* layer_1 = (double *)malloc(output_image_height*output_image_width*output_image_depth * sizeof(double)); hipMemcpy(layer_1, gpu_out1, output_image_height*output_image_width*output_image_depth * sizeof(double), hipMemcpyDeviceToHost); //**layer 1 is the output. hipFree(gpu_in1); hipFree(gpu_out1); //Convolution layer preparation. input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; kernel_height = 3; kernel_width = 3; kernel_depth = 32; no_of_kernels = 64; output_image_height = input_image_height - kernel_height + 1; output_image_width = input_image_width - kernel_width + 1; output_image_depth = no_of_kernels; //Defined 3 D blocks with z_threads=no_of_kernels and x_threads*y_threads*z_threads=max_threads_per_block. So, if x_threads=y_threads, then x_threads=sqrt(max_threads_per_block/z_threads). //Defined 2 D grids. z_threads = no_of_kernels; x_threads = sqrt(max_threads_per_block / z_threads); y_threads = x_threads; dim3 blockdim2(x_threads, y_threads, z_threads); dim3 griddim2(output_image_width / x_threads, output_image_height / y_threads, 1); //Copying input into GPU. double* gpu_in2; hipMalloc((void**)&gpu_in2, input_image_height*input_image_width *input_image_depth * sizeof(double)); hipMemcpy(gpu_in2, layer_1, input_image_height*input_image_width *input_image_depth * sizeof(double), hipMemcpyHostToDevice); //Copying kernels weights into GPU. double *kernel_weights2; hipMalloc((void **)&kernel_weights2, kernel_height*kernel_width*kernel_depth*no_of_kernels * sizeof(double)); hipMemcpy(kernel_weights2, W2_arr, kernel_height*kernel_width*kernel_depth*no_of_kernels * sizeof(double), hipMemcpyHostToDevice); //Copying kernel biases into GPU. double *kernel_biases2; hipMalloc((void **)&kernel_biases2, no_of_kernels * sizeof(double)); hipMemcpy(kernel_biases2, B2_arr, no_of_kernels * sizeof(double), hipMemcpyHostToDevice); //Creating output array inside GPU. double *gpu_out2; hipMalloc((void **)&gpu_out2, output_image_height*output_image_width*output_image_depth * sizeof(double)); convolution_kernel << <griddim2, blockdim2 >> > (input_image_height, input_image_width, input_image_depth, gpu_in2, kernel_height, kernel_width, kernel_depth, kernel_weights2, kernel_biases2, no_of_kernels, output_image_height, output_image_width, output_image_depth, gpu_out2); double* layer_2 = (double *)malloc(output_image_height*output_image_width*output_image_depth * sizeof(double)); hipMemcpy(layer_2, gpu_out2, output_image_height*output_image_width*output_image_depth * sizeof(double), hipMemcpyDeviceToHost); //**Layer 2 is the output. hipFree(gpu_in2); hipFree(gpu_out2); hipFree(kernel_weights2); hipFree(kernel_biases2); //Maxpooling layer. pool_height = 3; pool_width = 3; input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; z_threads = input_image_depth; x_threads = sqrt(max_threads_per_block / z_threads); y_threads = x_threads; int excess_w = input_image_height % pool_height; int excess_h = input_image_height % pool_height; output_image_height = (input_image_height) / pool_height; output_image_width = (input_image_width) / pool_width; output_image_depth = input_image_depth; dim3 blockdim3(x_threads, y_threads, z_threads); dim3 griddim3(output_image_width / x_threads, output_image_height / y_threads, 1); //Copying the previous output into GPU. double *gpu_in3; hipMalloc((void **)&gpu_in3, input_image_height*input_image_width*input_image_depth * sizeof(double)); hipMemcpy(gpu_in3, layer_2, input_image_height*input_image_width*input_image_depth * sizeof(double), hipMemcpyHostToDevice); //Creating output array inside GPU. double *gpu_out3; hipMalloc((void **)&gpu_out3, output_image_height*output_image_width*output_image_depth * sizeof(double)); maxpool_kernel << <griddim3, blockdim3 >> > (input_image_height, input_image_width, input_image_depth, gpu_in3, pool_height, pool_width, output_image_height, output_image_width, output_image_depth, gpu_out3); double* layer_3 = (double *)malloc(output_image_height*output_image_width*output_image_depth * sizeof(double)); hipMemcpy(layer_3, gpu_out3, output_image_height*output_image_width*output_image_depth * sizeof(double), hipMemcpyDeviceToHost); //**layer 1 is the output. hipFree(gpu_in3); hipFree(gpu_out3); //Flattening in the CPU itself. //The idea is to apply the same kind of C major flattening that keras does to the elements coming in from the second pooling layer. //The array coming in consists of rows of each sheet arranged side by side followed by the rows of the next sheet and so on. Jumbling up that order to stick with keras type flattening which is the C-major ordering consisting of z-axis changing fastest, follwed by column and then row changing. int in_h = output_image_height; int in_w = output_image_width; int in_d = output_image_depth; int image_pointer; int channel_pointer; int k = 0; double* flattened = (double *)malloc(in_h*in_w*in_d * sizeof(double)); for (image_pointer = 0; image_pointer < in_h*in_w; image_pointer++) { for (channel_pointer = 0; channel_pointer < in_d; channel_pointer++) { flattened[k] = layer_3[image_pointer + channel_pointer * in_h*in_w]; k++; } } //Fully connected/Dense layer. int input_layer_nodes = output_image_height * output_image_width*output_image_depth; int output_layer_nodes = 1024; double *gpu_in5; hipMalloc((void **)&gpu_in5, input_layer_nodes * sizeof(double)); hipMemcpy(gpu_in5, flattened, input_layer_nodes * sizeof(double), hipMemcpyHostToDevice); double *FC_weights5; hipMalloc((void **)&FC_weights5, input_layer_nodes *output_layer_nodes * sizeof(double)); hipMemcpy(FC_weights5, DW5_arr, input_layer_nodes *output_layer_nodes * sizeof(double), hipMemcpyHostToDevice); double *FC_biases5; hipMalloc((void **)&FC_biases5, output_layer_nodes * sizeof(double)); hipMemcpy(FC_biases5, DB5_arr, output_layer_nodes * sizeof(double), hipMemcpyHostToDevice); double *gpu_out5; hipMalloc((void **)&gpu_out5, output_layer_nodes * sizeof(double)); dim3 blocksize5(max_threads_per_block, 1, 1); dim3 gridsize5(output_layer_nodes / max_threads_per_block, 1, 1); dense_kernel << <gridsize5, blocksize5 >> > (input_layer_nodes, output_layer_nodes, gpu_in5, FC_weights5, FC_biases5, gpu_out5, number_of_classes); //**layer5 is the output. double* layer_5 = (double *)malloc(output_layer_nodes * sizeof(double)); hipMemcpy(layer_5, gpu_out5, output_layer_nodes * sizeof(double), hipMemcpyDeviceToHost); hipFree(gpu_in5); hipFree(gpu_out5); //Fully connected/dense layer. input_layer_nodes = output_layer_nodes; output_layer_nodes = number_of_classes; double *gpu_in7; hipMalloc((void **)&gpu_in7, input_layer_nodes * sizeof(double)); hipMemcpy(gpu_in7, layer_5, input_layer_nodes * sizeof(double), hipMemcpyHostToDevice); double *FC_weights7; hipMalloc((void **)&FC_weights7, input_layer_nodes *output_layer_nodes * sizeof(double)); hipMemcpy(FC_weights7, DW7_arr, input_layer_nodes *output_layer_nodes * sizeof(double), hipMemcpyHostToDevice); double *FC_biases7; hipMalloc((void **)&FC_biases7, output_layer_nodes * sizeof(double)); hipMemcpy(FC_biases7, DB7_arr, output_layer_nodes * sizeof(double), hipMemcpyHostToDevice); double *gpu_out7; hipMalloc((void **)&gpu_out7, output_layer_nodes * sizeof(double)); dim3 blocksize7(max_threads_per_block, 1, 1); dim3 gridsize7(output_layer_nodes / max_threads_per_block, 1, 1); dense_kernel << <gridsize7, blocksize7 >> > (input_layer_nodes, output_layer_nodes, gpu_in7, FC_weights7, FC_biases7, gpu_out7, number_of_classes); double* layer_7 = (double *)malloc(output_layer_nodes * sizeof(double)); hipMemcpy(layer_7, gpu_out7, output_layer_nodes * sizeof(double), hipMemcpyDeviceToHost); //**layer7 is the output. hipFree(gpu_in7); hipFree(gpu_out7); //Softmax of the output layer. int op_layer_size = number_of_classes; int i; double sum = 0.0; for (i = 0; i < op_layer_size; i++) { sum += exp(layer_7[i]); } double max = layer_7[0] / sum; int max_no = 0; for (i = 0; i < op_layer_size; i++) { if ((layer_7[i] / sum) > max) { max_no = i; } } printf("\n The written predicted digit is %d\n", max_no); }
ea5442c671c433aef174391525fbbaf8ddcc4541.cu
//Training of the CNN is done using Keras. After training for 10 epochs, the obtained accuracy on the training data set is 99.70 and on the test data set is 99.14. //This model implements the following layes in order- 2DConvolution---->Maxpooling---->2D Convolution---->Maxpooling---->Fully_connected layer---->Fully_connected layer. //The image is a 28*28 greyscale image. The specifications of the layers are as follows: //Layer_0: Convolution: 32 3*3 kernels with no padding and 1 stride. //Layer_1: Maxpooling: 2*2 filters with with no padding and 1 stride. //Layer_2: Convolution: 64 3*3 kernels with no padding and 1 stride. //Layer_3: Maxpooling: 2*2 filters with with no padding and 1 stride. //Layer_4: Flattening //Layer_5: Fully connected / dense layer with 1024 output units. //Layer_6: Dropout (done during training only). //Layer_7: Fully connected / dense layer with 10 output units. //All arrays and matrices are designed to be row ordered in this implementation. #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdlib.h> #include<math.h> //Kernel that does convolution. This convolution is done by each thread identifying that patch or portion of the image that it is responsible for its result and does the multiplication and addition of it's patche's values with the suitable kernel. //The depth of the output image is the number of kernels. __global__ void convolution_kernel(int h, int w, int d, double* gpu_in, int k_h, int k_w, int k_d, double* kernel_weights, double* kernel_biases, int num_kernels, int op_h, int op_w, int op_d, double* gpu_out) { //Identifying threads by their IDs. int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; int deep = blockDim.z *blockIdx.z + threadIdx.z; //Return if thread out of bounds if (row >= op_h || col >= op_w || deep >= op_d) return; double out=0.0; int kernel_pointer = 0; //Each thread/each output node identifies the corresponding element in the matrix that it is responsible to multiply-add. for (int depth_pointer = 0; depth_pointer < k_d; depth_pointer++) { for (int row_pointer = 0; row_pointer < k_h; row_pointer++) { for (int column_pointer = 0; column_pointer < k_w; column_pointer++) { out += gpu_in[((row*w + col) + row_pointer * w + column_pointer + h * w*depth_pointer)] * kernel_weights[kernel_pointer + deep * k_h*k_w*k_d]; kernel_pointer++; } } } //Bias addition and relu activation. One bias is applied to one output image layer, since one bias is applicable to one kernel. //Relu activation : relu(a)=max(0,a). If the value is less than 0 then it becomes 0, else it is retained. if (out + kernel_biases[deep] < 0.0) gpu_out[row*op_w + col + deep * op_h*op_w] = 0.0l; else gpu_out[row*op_w + col + deep * op_h*op_w] = out + kernel_biases[deep]; } //Kernel that does maxpooling. __global__ void maxpool_kernel(int h, int w, int d, double* gpu_in, int pool_height, int pool_width, int op_h, int op_w, int op_d, double* gpu_out) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; int deep = blockDim.z *blockIdx.z + threadIdx.z; if (row >= op_h || col >= op_w || deep >= op_d) return; double out; double max; int kernel_pointer = 0; //The maximum is chosen to be the first element of the pool filter. max = gpu_in[(deep*w*h) + (row*pool_height)*w + (col*pool_width)]; //We follow throgh all the elements within the filter's size and look for the maximum element and the corresponding maximum element becomes the thread's value. for (int row_pointer = 0; row_pointer < pool_height; row_pointer++) { for (int column_pointer = 0; column_pointer < pool_width; column_pointer++) { if (gpu_in[(deep*w*h) + (row*pool_height)*w + (col*pool_width) + (row_pointer*w) + (column_pointer)] > max) max = gpu_in[(deep*w*h) + (row*pool_height)*w + (col*pool_width) + (row_pointer*w) + (column_pointer)]; } } gpu_out[deep*op_w*op_h + row * op_w + col] = max; } //This kernel implements the fully connected layers. __global__ void dense_kernel(int num_input, int num_output, double* gpu_in, double* weights, double* biases, double* gpu_out, int num_classes) { int tid = blockDim.x*blockIdx.x + threadIdx.x; if (tid >= num_output) return; double sum = 0.0l; //The weights are extracted from Keras such that all the weights to one output node appears together, followed by weights to the next node and so on. //Thus, each output node will be a multiply add of adjacent weight values with the input nodes. for (int count = 0; count < num_input; count++) { sum += gpu_in[count] * weights[tid*num_input + count]; } sum += biases[tid]; //Activation: If the layer is the final layer, then don't do anything, otherwise relu activation max(0,value) is taken. if ((num_output) != num_classes) { if (sum < 0.0) { sum = 0.0l; } } gpu_out[tid] = sum; } int main() { //-------------------------------Reading all the weights and biases and the original image----------------------// //File pointers to all the weights and biases and the image. FILE * pFileImg; FILE * pFileW0; FILE * pFileB0; FILE * pFileW2; FILE * pFileB2; FILE * pFileDW5; FILE * pFileDB5; FILE * pFileDW7; FILE * pFileDB7; //Note: The weights are pulled out after training the mnist digit recognition dataset on keras with handwritten digits 0-9. The images are greysvale and hence to start with they have only one channel. //Weights are pulled out and inputted into the respective arrays. //Pulling out image values double* img_arr = (double *)malloc(28 * 28 * sizeof(double)); pFileImg = fopen("/home/meghanap/Image_RO.txt", "r"); if (pFileImg == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 784; i++) { fscanf(pFileImg, "%lf", &img_arr[i]); } //Pulling out kernel weights for first conv layer. double* W0_arr = (double *)malloc(288 * sizeof(double)); pFileW0 = fopen("/home/meghanap/W0_RO.txt", "r"); if (pFileW0 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 288; i++) { fscanf(pFileW0, "%lf", &W0_arr[i]); } //Pulling out kernel biases for first conv layer. double* B0_arr = (double *)malloc(32 * sizeof(double)); pFileB0 = fopen("/home/meghanap/B0.txt", "r"); if (pFileB0 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 32; i++) { fscanf(pFileB0, "%lf", &B0_arr[i]); } //Pulling out kernel weights for second conv layer. double* W2_arr = (double *)malloc(18432 * sizeof(double)); pFileW2 = fopen("/home/meghanap/W2_RO.txt", "r"); if (pFileW2 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 18432; i++) { fscanf(pFileW2, "%lf", &W2_arr[i]); } //Pulling out kernel biases for second conv layer. double* B2_arr = (double *)malloc(64 * sizeof(double)); pFileB2 = fopen("/home/meghanap/B2.txt", "r"); if (pFileB2 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 64; i++) { fscanf(pFileB2, "%lf", &B2_arr[i]); } //Pulling out weights for first fully connected layer. double* DW5_arr = (double *)malloc(1638400 * sizeof(double)); pFileDW5 = fopen("/home/meghanap/DW5_RO.txt", "r"); if (pFileDW5 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 1638400; i++) { fscanf(pFileDW5, "%lf", &DW5_arr[i]); } //Pulling out biases for first fully connected layer. double* DB5_arr = (double *)malloc(1024 * sizeof(double)); pFileDB5 = fopen("/home/meghanap/DB5.txt", "r"); if (pFileDB5 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 1024; i++) { fscanf(pFileDB5, "%lf", &DB5_arr[i]); } //Pulling out weights for second fully connected layer. double* DW7_arr = (double *)malloc(10240 * sizeof(double)); pFileDW7 = fopen("/home/meghanap/DW7_RO.txt", "r"); if (pFileDW7 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 10240; i++) { fscanf(pFileDW7, "%lf", &DW7_arr[i]); } //Pulling out biases for second fully connected layer. double* DB7_arr = (double *)malloc(10 * sizeof(double)); pFileDB7 = fopen("/home/meghanap/DB7.txt", "r"); if (pFileDB7 == NULL) { fputs("File error", stderr); exit(1); } for (int i = 0; i < 10; i++) { fscanf(pFileDB7, "%lf", &DB7_arr[i]); } //-------------------------------------Reading done------------------------------------------------// int number_of_classes = 10; cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); int max_threads_per_block = prop.maxThreadsPerBlock; //Convolution kernel preparation. int input_image_height = 28; int input_image_width = 28; int input_image_depth = 1; int kernel_height = 3; int kernel_width = 3; int kernel_depth = 1; int no_of_kernels = 32; int output_image_height = input_image_height - kernel_height + 1; int output_image_width = input_image_width - kernel_width + 1; int output_image_depth = no_of_kernels; //Defined 3 D blocks with z_threads=no_of_kernels and x_threads*y_threads*z_threads=max_threads_per_block. So, if x_threads=y_threads, then x_threads=sqrt(max_threads_per_block/z_threads). //Defined 2 D grids. int z_threads = no_of_kernels; int x_threads = sqrt(max_threads_per_block / z_threads); int y_threads = x_threads; dim3 blockdim0(x_threads, y_threads, z_threads); dim3 griddim0(output_image_width / x_threads, output_image_height / y_threads, 1); //Copying the image into GPU. double *gpu_img0; cudaMalloc((void **)&gpu_img0, 28 * 28 * sizeof(double)); cudaMemcpy(gpu_img0, img_arr, 28 * 28 * sizeof(double), cudaMemcpyHostToDevice); //Copying the kernel weights into GPU. double *kernel_weights0; cudaMalloc((void **)&kernel_weights0, 3 * 3 * 32 * sizeof(double)); cudaMemcpy(kernel_weights0, W0_arr, 3 * 3 * 32 * sizeof(double), cudaMemcpyHostToDevice); //Copying kernel biases into GPU. double *kernel_biases0; cudaMalloc((void **)&kernel_biases0, 32 * sizeof(double)); cudaMemcpy(kernel_biases0, B0_arr, 32 * sizeof(double), cudaMemcpyHostToDevice); //Creating output array inside GPU. double *gpu_out0; cudaMalloc((void **)&gpu_out0, 26 * 26 * 32 * sizeof(double)); convolution_kernel << <griddim0, blockdim0 >> > (input_image_height, input_image_width, input_image_depth, gpu_img0, kernel_height, kernel_width, kernel_depth, kernel_weights0, kernel_biases0, no_of_kernels, output_image_height, output_image_width, output_image_depth, gpu_out0); double* layer_0 = (double *)malloc(26 * 26 * 32 * sizeof(double)); cudaMemcpy(layer_0, gpu_out0, 26 * 26 * 32 * sizeof(double), cudaMemcpyDeviceToHost); //***layer_0 is the output from the first layer. //Free all the unnecessary things from the GPU to make space for the next kernel. cudaFree(gpu_img0); cudaFree(kernel_weights0); cudaFree(kernel_biases0); cudaFree(gpu_out0); //Maxpooling layer kernel preparation. int pool_height = 3; int pool_width = 3; input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; z_threads = input_image_depth; x_threads = sqrt(max_threads_per_block / z_threads); y_threads = x_threads; output_image_height = (input_image_height - (input_image_height % pool_height)) / pool_height; output_image_width = (input_image_width - (input_image_width % pool_width)) / pool_width; output_image_depth = input_image_depth; dim3 blockdim1(x_threads, y_threads, z_threads); dim3 griddim1(output_image_width / x_threads, output_image_height / y_threads, 1); //Copying the previous output into GPU. double *gpu_in1; cudaMalloc((void **)&gpu_in1, input_image_height*input_image_width*input_image_depth * sizeof(double)); cudaMemcpy(gpu_in1, layer_0, input_image_height*input_image_width*input_image_depth * sizeof(double), cudaMemcpyHostToDevice); //Creating output array inside GPU. double *gpu_out1; cudaMalloc((void **)&gpu_out1, output_image_height*output_image_width*output_image_depth * sizeof(double)); maxpool_kernel << <griddim1, blockdim1 >> > (input_image_height, input_image_width, input_image_depth, gpu_in1, pool_height, pool_width, output_image_height, output_image_width, output_image_depth, gpu_out1); double* layer_1 = (double *)malloc(output_image_height*output_image_width*output_image_depth * sizeof(double)); cudaMemcpy(layer_1, gpu_out1, output_image_height*output_image_width*output_image_depth * sizeof(double), cudaMemcpyDeviceToHost); //**layer 1 is the output. cudaFree(gpu_in1); cudaFree(gpu_out1); //Convolution layer preparation. input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; kernel_height = 3; kernel_width = 3; kernel_depth = 32; no_of_kernels = 64; output_image_height = input_image_height - kernel_height + 1; output_image_width = input_image_width - kernel_width + 1; output_image_depth = no_of_kernels; //Defined 3 D blocks with z_threads=no_of_kernels and x_threads*y_threads*z_threads=max_threads_per_block. So, if x_threads=y_threads, then x_threads=sqrt(max_threads_per_block/z_threads). //Defined 2 D grids. z_threads = no_of_kernels; x_threads = sqrt(max_threads_per_block / z_threads); y_threads = x_threads; dim3 blockdim2(x_threads, y_threads, z_threads); dim3 griddim2(output_image_width / x_threads, output_image_height / y_threads, 1); //Copying input into GPU. double* gpu_in2; cudaMalloc((void**)&gpu_in2, input_image_height*input_image_width *input_image_depth * sizeof(double)); cudaMemcpy(gpu_in2, layer_1, input_image_height*input_image_width *input_image_depth * sizeof(double), cudaMemcpyHostToDevice); //Copying kernels weights into GPU. double *kernel_weights2; cudaMalloc((void **)&kernel_weights2, kernel_height*kernel_width*kernel_depth*no_of_kernels * sizeof(double)); cudaMemcpy(kernel_weights2, W2_arr, kernel_height*kernel_width*kernel_depth*no_of_kernels * sizeof(double), cudaMemcpyHostToDevice); //Copying kernel biases into GPU. double *kernel_biases2; cudaMalloc((void **)&kernel_biases2, no_of_kernels * sizeof(double)); cudaMemcpy(kernel_biases2, B2_arr, no_of_kernels * sizeof(double), cudaMemcpyHostToDevice); //Creating output array inside GPU. double *gpu_out2; cudaMalloc((void **)&gpu_out2, output_image_height*output_image_width*output_image_depth * sizeof(double)); convolution_kernel << <griddim2, blockdim2 >> > (input_image_height, input_image_width, input_image_depth, gpu_in2, kernel_height, kernel_width, kernel_depth, kernel_weights2, kernel_biases2, no_of_kernels, output_image_height, output_image_width, output_image_depth, gpu_out2); double* layer_2 = (double *)malloc(output_image_height*output_image_width*output_image_depth * sizeof(double)); cudaMemcpy(layer_2, gpu_out2, output_image_height*output_image_width*output_image_depth * sizeof(double), cudaMemcpyDeviceToHost); //**Layer 2 is the output. cudaFree(gpu_in2); cudaFree(gpu_out2); cudaFree(kernel_weights2); cudaFree(kernel_biases2); //Maxpooling layer. pool_height = 3; pool_width = 3; input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; z_threads = input_image_depth; x_threads = sqrt(max_threads_per_block / z_threads); y_threads = x_threads; int excess_w = input_image_height % pool_height; int excess_h = input_image_height % pool_height; output_image_height = (input_image_height) / pool_height; output_image_width = (input_image_width) / pool_width; output_image_depth = input_image_depth; dim3 blockdim3(x_threads, y_threads, z_threads); dim3 griddim3(output_image_width / x_threads, output_image_height / y_threads, 1); //Copying the previous output into GPU. double *gpu_in3; cudaMalloc((void **)&gpu_in3, input_image_height*input_image_width*input_image_depth * sizeof(double)); cudaMemcpy(gpu_in3, layer_2, input_image_height*input_image_width*input_image_depth * sizeof(double), cudaMemcpyHostToDevice); //Creating output array inside GPU. double *gpu_out3; cudaMalloc((void **)&gpu_out3, output_image_height*output_image_width*output_image_depth * sizeof(double)); maxpool_kernel << <griddim3, blockdim3 >> > (input_image_height, input_image_width, input_image_depth, gpu_in3, pool_height, pool_width, output_image_height, output_image_width, output_image_depth, gpu_out3); double* layer_3 = (double *)malloc(output_image_height*output_image_width*output_image_depth * sizeof(double)); cudaMemcpy(layer_3, gpu_out3, output_image_height*output_image_width*output_image_depth * sizeof(double), cudaMemcpyDeviceToHost); //**layer 1 is the output. cudaFree(gpu_in3); cudaFree(gpu_out3); //Flattening in the CPU itself. //The idea is to apply the same kind of C major flattening that keras does to the elements coming in from the second pooling layer. //The array coming in consists of rows of each sheet arranged side by side followed by the rows of the next sheet and so on. Jumbling up that order to stick with keras type flattening which is the C-major ordering consisting of z-axis changing fastest, follwed by column and then row changing. int in_h = output_image_height; int in_w = output_image_width; int in_d = output_image_depth; int image_pointer; int channel_pointer; int k = 0; double* flattened = (double *)malloc(in_h*in_w*in_d * sizeof(double)); for (image_pointer = 0; image_pointer < in_h*in_w; image_pointer++) { for (channel_pointer = 0; channel_pointer < in_d; channel_pointer++) { flattened[k] = layer_3[image_pointer + channel_pointer * in_h*in_w]; k++; } } //Fully connected/Dense layer. int input_layer_nodes = output_image_height * output_image_width*output_image_depth; int output_layer_nodes = 1024; double *gpu_in5; cudaMalloc((void **)&gpu_in5, input_layer_nodes * sizeof(double)); cudaMemcpy(gpu_in5, flattened, input_layer_nodes * sizeof(double), cudaMemcpyHostToDevice); double *FC_weights5; cudaMalloc((void **)&FC_weights5, input_layer_nodes *output_layer_nodes * sizeof(double)); cudaMemcpy(FC_weights5, DW5_arr, input_layer_nodes *output_layer_nodes * sizeof(double), cudaMemcpyHostToDevice); double *FC_biases5; cudaMalloc((void **)&FC_biases5, output_layer_nodes * sizeof(double)); cudaMemcpy(FC_biases5, DB5_arr, output_layer_nodes * sizeof(double), cudaMemcpyHostToDevice); double *gpu_out5; cudaMalloc((void **)&gpu_out5, output_layer_nodes * sizeof(double)); dim3 blocksize5(max_threads_per_block, 1, 1); dim3 gridsize5(output_layer_nodes / max_threads_per_block, 1, 1); dense_kernel << <gridsize5, blocksize5 >> > (input_layer_nodes, output_layer_nodes, gpu_in5, FC_weights5, FC_biases5, gpu_out5, number_of_classes); //**layer5 is the output. double* layer_5 = (double *)malloc(output_layer_nodes * sizeof(double)); cudaMemcpy(layer_5, gpu_out5, output_layer_nodes * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(gpu_in5); cudaFree(gpu_out5); //Fully connected/dense layer. input_layer_nodes = output_layer_nodes; output_layer_nodes = number_of_classes; double *gpu_in7; cudaMalloc((void **)&gpu_in7, input_layer_nodes * sizeof(double)); cudaMemcpy(gpu_in7, layer_5, input_layer_nodes * sizeof(double), cudaMemcpyHostToDevice); double *FC_weights7; cudaMalloc((void **)&FC_weights7, input_layer_nodes *output_layer_nodes * sizeof(double)); cudaMemcpy(FC_weights7, DW7_arr, input_layer_nodes *output_layer_nodes * sizeof(double), cudaMemcpyHostToDevice); double *FC_biases7; cudaMalloc((void **)&FC_biases7, output_layer_nodes * sizeof(double)); cudaMemcpy(FC_biases7, DB7_arr, output_layer_nodes * sizeof(double), cudaMemcpyHostToDevice); double *gpu_out7; cudaMalloc((void **)&gpu_out7, output_layer_nodes * sizeof(double)); dim3 blocksize7(max_threads_per_block, 1, 1); dim3 gridsize7(output_layer_nodes / max_threads_per_block, 1, 1); dense_kernel << <gridsize7, blocksize7 >> > (input_layer_nodes, output_layer_nodes, gpu_in7, FC_weights7, FC_biases7, gpu_out7, number_of_classes); double* layer_7 = (double *)malloc(output_layer_nodes * sizeof(double)); cudaMemcpy(layer_7, gpu_out7, output_layer_nodes * sizeof(double), cudaMemcpyDeviceToHost); //**layer7 is the output. cudaFree(gpu_in7); cudaFree(gpu_out7); //Softmax of the output layer. int op_layer_size = number_of_classes; int i; double sum = 0.0; for (i = 0; i < op_layer_size; i++) { sum += exp(layer_7[i]); } double max = layer_7[0] / sum; int max_no = 0; for (i = 0; i < op_layer_size; i++) { if ((layer_7[i] / sum) > max) { max_no = i; } } printf("\n The written predicted digit is %d\n", max_no); }
5071892853ed5da544a9da4ef9001ddbc7f3cbaf.hip
// !!! This is a file automatically generated by hipify!!! #include "../include/cuda_kernel.cuh" #include "../include/device_resource_manager.cuh" #include "../include/time_manager.hpp" #include <cassert> #include <cstdio> #include <ctime> #include <memory> #include <utility> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> namespace sudoku { namespace kernel { namespace { constexpr unsigned kBlocks = 512; constexpr unsigned kThreadsPerBlock = 128; constexpr unsigned kIterations = 24; #define SetNthBit(number, n) ((number) |= (1ul << (n))) #define ClearNthBit(number, n) ((number) &= ~(1ul << (n))) #define GetNthBit(number, n) (((number) >> (n)) & 1u) #define gpuErrchk(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __device__ bool NotInRow(Board::FieldValue *board, int row) { uint16_t st = 0; bool ret = true; Board::FieldValue v; for (int i = 0; i < Board::kBoardSize; i++) { v = board[Board::kBoardSize * row + i]; ret = v <= Board::kBoardSize && ret; ret = !(v && GetNthBit(st, v)) && ret; SetNthBit(st, v); } return ret; } __device__ bool NotInCol(Board::FieldValue *board, int col) { uint16_t st = 0; bool ret = true; Board::FieldValue v; for (int i = 0; i < Board::kBoardSize; i++) { v = board[Board::kBoardSize * i + col]; ret = v <= Board::kBoardSize && ret; ret = !(v && GetNthBit(st, v)) && ret; SetNthBit(st, v); } return ret; } __device__ bool NotInBox(Board::FieldValue *board, int row, int col) { row -= row % Board::kQuadrantSize; col -= col % Board::kQuadrantSize; uint16_t st = 0; bool ret = true; auto pom_y = row - row % Board::kQuadrantSize; auto pom_x = col - col % Board::kQuadrantSize; Board::FieldValue v; for (int i = 0; i < Board::kQuadrantSize; ++i) for (int j = 0; j < Board::kQuadrantSize; ++j) { v = board[(pom_y + i) * Board::kBoardSize + pom_x + j]; ret = v <= Board::kBoardSize && ret; ret = !(v && GetNthBit(st, v)) && ret; SetNthBit(st, v); } return ret; } __device__ bool IsValid(Board::FieldValue *board, int row, int col) { return NotInRow(board, row) && NotInCol(board, col) && NotInBox(board, row, col); } __global__ void Generator(Board::FieldValue *old_boards, int *old_boards_count, Board::FieldValue *new_boards, int *new_boards_count, unsigned char *empty_fields, unsigned char *empty_fields_count, Board::FieldValue *solved_board, int *solved_board_mutex) { __shared__ Board::FieldValue s_current_boards[kThreadsPerBlock * Board::kBoardSize * Board::kBoardSize]; auto *my_board = s_current_boards + threadIdx.x * Board::kBoardSize * Board::kBoardSize; for (int index = blockIdx.x * kThreadsPerBlock; index < *old_boards_count; index += kThreadsPerBlock * kBlocks) { __syncthreads(); if (*solved_board_mutex) return; for (int i = 0; i < Board::kBoardSize * Board::kBoardSize; ++i) { auto j = i * kThreadsPerBlock + index * Board::kBoardSize * Board::kBoardSize + threadIdx.x; s_current_boards[i * kThreadsPerBlock + threadIdx.x] = j < *old_boards_count * Board::kBoardSize * Board::kBoardSize ? old_boards[j] : 1; } __syncthreads(); for (int i = 0; i < Board::kBoardSize * Board::kBoardSize; ++i) { if (!my_board[i]) { auto row = i / Board::kBoardSize; auto col = i % Board::kBoardSize; for (int j = 1; j <= Board::kBoardSize; ++j) { my_board[i] = j; if (IsValid(my_board, row, col)) { auto pos = atomicAdd(new_boards_count, 1); if (pos < deviceResourceManager::kNBoards) { unsigned char empty_index = static_cast<unsigned char>(-1); for (int k = 0; k < Board::kBoardSize * Board::kBoardSize; ++k) { if (!(new_boards[pos * Board::kBoardSize * Board::kBoardSize + k] = my_board[k])) empty_fields[++empty_index + pos * Board::kBoardSize * Board::kBoardSize] = k; } empty_fields_count[pos] = empty_index + 1; } else { atomicMin(new_boards_count, deviceResourceManager::kNBoards); return; } } } goto NOT_SOLVED; } } if (threadIdx.x + index < *old_boards_count) { atomicCAS(solved_board_mutex, 0, blockIdx.x * blockDim.x + threadIdx.x); if (*solved_board_mutex == blockIdx.x * kThreadsPerBlock + threadIdx.x) for (int i = 0; i < Board::kBoardSize * Board::kBoardSize; ++i) solved_board[i] = my_board[i]; } NOT_SOLVED:; } } __device__ uint16_t GetPossibleValues( Board::FieldValue board[Board::kBoardSize][Board::kBoardSize]) { uint16_t free = 0x03ff; for (int i = 0; i < Board::kBoardSize; ++i) { ClearNthBit(free, board[threadIdx.y][i]); ClearNthBit(free, board[i][threadIdx.x]); } auto pom_x = threadIdx.x - threadIdx.x % Board::kQuadrantSize; auto pom_y = threadIdx.y - threadIdx.y % Board::kQuadrantSize; for (int i = 0; i < Board::kQuadrantSize; ++i) for (int j = 0; j < Board::kQuadrantSize; ++j) ClearNthBit(free, board[pom_y + (threadIdx.y + j) % Board::kQuadrantSize] [pom_x + (threadIdx.x + i) % Board::kQuadrantSize]); return free >> 1; } __device__ bool Solve(Board::FieldValue *board, uint8_t *empty_fields, uint8_t empty_fields_count) { unsigned char empty_index = 0; auto field = empty_fields[empty_index]; auto row = field / Board::kBoardSize; auto col = field % Board::kBoardSize; while (empty_index < empty_fields_count) { ++board[field]; if (IsValid(board, row, col)) { field = empty_fields[++empty_index]; row = field / Board::kBoardSize; col = field % Board::kBoardSize; } else { if (board[field] >= Board::kBoardSize) { board[field] = 0; field = empty_fields[--empty_index]; row = field / Board::kBoardSize; col = field % Board::kBoardSize; } } } return empty_index == empty_fields_count; } __global__ void Backtracker(Board::FieldValue *old_boards, int *old_boards_count, uint8_t *empty_fields, uint8_t *empty_fields_count, Board::FieldValue *solved_board, int *solved_board_mutex) { for (int index = blockIdx.x * kThreadsPerBlock + threadIdx.x; index < *old_boards_count; index += kThreadsPerBlock * kBlocks) { if (*solved_board_mutex) return; auto index_mul = index * Board::kBoardSize * Board::kBoardSize; if (Solve(old_boards + index_mul, empty_fields + index_mul, empty_fields_count[index])) { atomicCAS(solved_board_mutex, 0, blockIdx.x * kThreadsPerBlock + threadIdx.x); if (*solved_board_mutex != blockIdx.x * kThreadsPerBlock + threadIdx.x) return; for (int i = 0; i < Board::kBoardSize * Board::kBoardSize; ++i) solved_board[i] = old_boards[index_mul + i]; } } } __global__ void Simplificator(Board::FieldValue *old_boards, int *old_boards_count, Board::FieldValue *new_boards, int *new_boards_count) { __shared__ Board::FieldValue s_board[Board::kBoardSize][Board::kBoardSize]; __shared__ int pos; pos = 0; for (int index = blockIdx.x; index < *old_boards_count; index += kBlocks) { __syncthreads(); bool active = !(s_board[threadIdx.y][threadIdx.x] = (old_boards + index * Board::kBoardSize * Board::kBoardSize)[Board::kBoardSize * threadIdx.y + threadIdx.x]); __syncthreads(); auto pv = GetPossibleValues(s_board); auto nelems = __popc(pv); while (__syncthreads_or(active && nelems == 1)) { if (active && nelems == 1) { s_board[threadIdx.y][threadIdx.x] = __ffs(pv); active = false; } __syncthreads(); if (active) { pv = GetPossibleValues(s_board); nelems = __popc(pv); } } if (__syncthreads_or(active && nelems == 0)) continue; if (__syncthreads_and( IsValid(reinterpret_cast<Board::FieldValue *>(s_board), threadIdx.y, threadIdx.x))) { if (threadIdx.x + threadIdx.y == 0) pos = atomicAdd(new_boards_count, 1); __syncthreads(); (new_boards + pos * Board::kBoardSize * Board::kBoardSize)[Board::kBoardSize * threadIdx.y + threadIdx.x] = s_board[threadIdx.y][threadIdx.x]; } } } class ScopedCudaEvent { public: ScopedCudaEvent() { gpuErrchk(hipEventCreate(&event_)); } ~ScopedCudaEvent() { hipEventDestroy(event_); } hipEvent_t Get() { return event_; } void Record() { gpuErrchk(hipEventRecord(event_)); } void Sync() { gpuErrchk(hipEventSynchronize(event_)); } private: hipEvent_t event_; ScopedCudaEvent(ScopedCudaEvent const &) = delete; ScopedCudaEvent &operator=(ScopedCudaEvent const &) = delete; }; class ScopedCudaStream { public: ScopedCudaStream() { gpuErrchk(hipStreamCreate(&stream_)); } ~ScopedCudaStream() { hipStreamDestroy(stream_); } hipStream_t Get() { return stream_; } void Sync() { gpuErrchk(hipStreamSynchronize(stream_)); } private: hipStream_t stream_; ScopedCudaStream(ScopedCudaStream const &) = delete; ScopedCudaStream &operator=(ScopedCudaStream const &) = delete; }; } // namespace std::vector<Board::FieldValue> Run(std::vector<Board::FieldValue> const &board) { Board::FieldValue *d_old_boards = deviceResourceManager::GetOldBoards(); int *d_old_boards_count = deviceResourceManager::GetOldBoardsCount(); Board::FieldValue *d_new_boards = deviceResourceManager::GetNewBoards(); int *d_new_boards_count = deviceResourceManager::GetNewBoardsCount(); Board::FieldValue *d_solved_board = deviceResourceManager::GetSolvedBoard(); int *d_solved_board_mutex = deviceResourceManager::GetSolvedBoardMutex(); uint8_t *d_empty_fields = deviceResourceManager::GetEmptyFields(); uint8_t *d_empty_fields_count = deviceResourceManager::GetEmptyFieldsCount(); ScopedCudaStream kernel_stream; ScopedCudaStream old_boards_set_stream, new_boards_set_stream, empty_fields_set_stream, empty_fields_count_set_stream; ScopedCudaEvent start, stop; start.Record(); gpuErrchk(hipMemsetAsync(d_old_boards, 0, deviceResourceManager::kNBoards * Board::kBoardSize * Board::kBoardSize * sizeof(Board::FieldValue), old_boards_set_stream.Get())); gpuErrchk(hipMemsetAsync(d_new_boards, 0, deviceResourceManager::kNBoards * Board::kBoardSize * Board::kBoardSize * sizeof(Board::FieldValue), new_boards_set_stream.Get())); gpuErrchk(hipMemsetAsync(d_empty_fields, 0, deviceResourceManager::kNBoards * Board::kBoardSize * Board::kBoardSize * sizeof(uint8_t), empty_fields_set_stream.Get())); gpuErrchk(hipMemsetAsync(d_empty_fields_count, 0, deviceResourceManager::kNBoards * sizeof(uint8_t), empty_fields_count_set_stream.Get())); std::unique_ptr<int> one(new int(1)); gpuErrchk(hipMemcpy(d_old_boards_count, one.get(), sizeof(int), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_old_boards, board.data(), Board::kBoardSize * Board::kBoardSize * sizeof(Board::FieldValue), hipMemcpyHostToDevice)); gpuErrchk(hipMemset(d_solved_board, 0, Board::kBoardSize * Board::kBoardSize * sizeof(Board::FieldValue))); gpuErrchk(hipMemset(d_solved_board_mutex, 0, sizeof(int))); old_boards_set_stream.Sync(); new_boards_set_stream.Sync(); empty_fields_set_stream.Sync(); empty_fields_count_set_stream.Sync(); gpuErrchk(hipMemset(d_new_boards_count, 0, sizeof(int))); hipLaunchKernelGGL(( Generator), dim3(kBlocks), dim3(kThreadsPerBlock), 0, kernel_stream.Get(), d_old_boards, d_old_boards_count, d_new_boards, d_new_boards_count, d_empty_fields, d_empty_fields_count, d_solved_board, d_solved_board_mutex); std::swap(d_old_boards, d_new_boards); std::swap(d_old_boards_count, d_new_boards_count); kernel_stream.Sync(); for (int i = 0; i < kIterations; ++i) { gpuErrchk(hipMemset(d_new_boards_count, 0, sizeof(int))); hipLaunchKernelGGL(( Simplificator), dim3(kBlocks), dim3(dim3(Board::kBoardSize, Board::kBoardSize)), 0, kernel_stream.Get(), d_old_boards, d_old_boards_count, d_new_boards, d_new_boards_count); std::swap(d_old_boards, d_new_boards); std::swap(d_old_boards_count, d_new_boards_count); kernel_stream.Sync(); gpuErrchk(hipMemset(d_new_boards_count, 0, sizeof(int))); hipLaunchKernelGGL(( Generator), dim3(kBlocks), dim3(kThreadsPerBlock), 0, kernel_stream.Get(), d_old_boards, d_old_boards_count, d_new_boards, d_new_boards_count, d_empty_fields, d_empty_fields_count, d_solved_board, d_solved_board_mutex); std::swap(d_old_boards, d_new_boards); std::swap(d_old_boards_count, d_new_boards_count); kernel_stream.Sync(); } int solved = 0; gpuErrchk(hipMemcpy(&solved, d_solved_board_mutex, sizeof(int), hipMemcpyDeviceToHost)); if (!solved) { hipLaunchKernelGGL(( Backtracker), dim3(kBlocks), dim3(kThreadsPerBlock), 0, kernel_stream.Get(), d_old_boards, d_old_boards_count, d_empty_fields, d_empty_fields_count, d_solved_board, d_solved_board_mutex); kernel_stream.Sync(); } std::unique_ptr<Board::FieldValue[]> ret( new Board::FieldValue[Board::kBoardSize * Board::kBoardSize]); gpuErrchk(hipMemcpy(ret.get(), d_solved_board, Board::kBoardSize * Board::kBoardSize * sizeof(Board::FieldValue), hipMemcpyDeviceToHost)); stop.Record(); stop.Sync(); float ms = 0; gpuErrchk(hipEventElapsedTime(&ms, start.Get(), stop.Get())); timeManager::AddTimeElapsed(ms); return {ret.get(), ret.get() + Board::kBoardSize * Board::kBoardSize}; } } // namespace kernel } // namespace sudoku
5071892853ed5da544a9da4ef9001ddbc7f3cbaf.cu
#include "../include/cuda_kernel.cuh" #include "../include/device_resource_manager.cuh" #include "../include/time_manager.hpp" #include <cassert> #include <cstdio> #include <ctime> #include <memory> #include <utility> #include <cuda.h> #include <cuda_runtime_api.h> namespace sudoku { namespace kernel { namespace { constexpr unsigned kBlocks = 512; constexpr unsigned kThreadsPerBlock = 128; constexpr unsigned kIterations = 24; #define SetNthBit(number, n) ((number) |= (1ul << (n))) #define ClearNthBit(number, n) ((number) &= ~(1ul << (n))) #define GetNthBit(number, n) (((number) >> (n)) & 1u) #define gpuErrchk(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __device__ bool NotInRow(Board::FieldValue *board, int row) { uint16_t st = 0; bool ret = true; Board::FieldValue v; for (int i = 0; i < Board::kBoardSize; i++) { v = board[Board::kBoardSize * row + i]; ret = v <= Board::kBoardSize && ret; ret = !(v && GetNthBit(st, v)) && ret; SetNthBit(st, v); } return ret; } __device__ bool NotInCol(Board::FieldValue *board, int col) { uint16_t st = 0; bool ret = true; Board::FieldValue v; for (int i = 0; i < Board::kBoardSize; i++) { v = board[Board::kBoardSize * i + col]; ret = v <= Board::kBoardSize && ret; ret = !(v && GetNthBit(st, v)) && ret; SetNthBit(st, v); } return ret; } __device__ bool NotInBox(Board::FieldValue *board, int row, int col) { row -= row % Board::kQuadrantSize; col -= col % Board::kQuadrantSize; uint16_t st = 0; bool ret = true; auto pom_y = row - row % Board::kQuadrantSize; auto pom_x = col - col % Board::kQuadrantSize; Board::FieldValue v; for (int i = 0; i < Board::kQuadrantSize; ++i) for (int j = 0; j < Board::kQuadrantSize; ++j) { v = board[(pom_y + i) * Board::kBoardSize + pom_x + j]; ret = v <= Board::kBoardSize && ret; ret = !(v && GetNthBit(st, v)) && ret; SetNthBit(st, v); } return ret; } __device__ bool IsValid(Board::FieldValue *board, int row, int col) { return NotInRow(board, row) && NotInCol(board, col) && NotInBox(board, row, col); } __global__ void Generator(Board::FieldValue *old_boards, int *old_boards_count, Board::FieldValue *new_boards, int *new_boards_count, unsigned char *empty_fields, unsigned char *empty_fields_count, Board::FieldValue *solved_board, int *solved_board_mutex) { __shared__ Board::FieldValue s_current_boards[kThreadsPerBlock * Board::kBoardSize * Board::kBoardSize]; auto *my_board = s_current_boards + threadIdx.x * Board::kBoardSize * Board::kBoardSize; for (int index = blockIdx.x * kThreadsPerBlock; index < *old_boards_count; index += kThreadsPerBlock * kBlocks) { __syncthreads(); if (*solved_board_mutex) return; for (int i = 0; i < Board::kBoardSize * Board::kBoardSize; ++i) { auto j = i * kThreadsPerBlock + index * Board::kBoardSize * Board::kBoardSize + threadIdx.x; s_current_boards[i * kThreadsPerBlock + threadIdx.x] = j < *old_boards_count * Board::kBoardSize * Board::kBoardSize ? old_boards[j] : 1; } __syncthreads(); for (int i = 0; i < Board::kBoardSize * Board::kBoardSize; ++i) { if (!my_board[i]) { auto row = i / Board::kBoardSize; auto col = i % Board::kBoardSize; for (int j = 1; j <= Board::kBoardSize; ++j) { my_board[i] = j; if (IsValid(my_board, row, col)) { auto pos = atomicAdd(new_boards_count, 1); if (pos < deviceResourceManager::kNBoards) { unsigned char empty_index = static_cast<unsigned char>(-1); for (int k = 0; k < Board::kBoardSize * Board::kBoardSize; ++k) { if (!(new_boards[pos * Board::kBoardSize * Board::kBoardSize + k] = my_board[k])) empty_fields[++empty_index + pos * Board::kBoardSize * Board::kBoardSize] = k; } empty_fields_count[pos] = empty_index + 1; } else { atomicMin(new_boards_count, deviceResourceManager::kNBoards); return; } } } goto NOT_SOLVED; } } if (threadIdx.x + index < *old_boards_count) { atomicCAS(solved_board_mutex, 0, blockIdx.x * blockDim.x + threadIdx.x); if (*solved_board_mutex == blockIdx.x * kThreadsPerBlock + threadIdx.x) for (int i = 0; i < Board::kBoardSize * Board::kBoardSize; ++i) solved_board[i] = my_board[i]; } NOT_SOLVED:; } } __device__ uint16_t GetPossibleValues( Board::FieldValue board[Board::kBoardSize][Board::kBoardSize]) { uint16_t free = 0x03ff; for (int i = 0; i < Board::kBoardSize; ++i) { ClearNthBit(free, board[threadIdx.y][i]); ClearNthBit(free, board[i][threadIdx.x]); } auto pom_x = threadIdx.x - threadIdx.x % Board::kQuadrantSize; auto pom_y = threadIdx.y - threadIdx.y % Board::kQuadrantSize; for (int i = 0; i < Board::kQuadrantSize; ++i) for (int j = 0; j < Board::kQuadrantSize; ++j) ClearNthBit(free, board[pom_y + (threadIdx.y + j) % Board::kQuadrantSize] [pom_x + (threadIdx.x + i) % Board::kQuadrantSize]); return free >> 1; } __device__ bool Solve(Board::FieldValue *board, uint8_t *empty_fields, uint8_t empty_fields_count) { unsigned char empty_index = 0; auto field = empty_fields[empty_index]; auto row = field / Board::kBoardSize; auto col = field % Board::kBoardSize; while (empty_index < empty_fields_count) { ++board[field]; if (IsValid(board, row, col)) { field = empty_fields[++empty_index]; row = field / Board::kBoardSize; col = field % Board::kBoardSize; } else { if (board[field] >= Board::kBoardSize) { board[field] = 0; field = empty_fields[--empty_index]; row = field / Board::kBoardSize; col = field % Board::kBoardSize; } } } return empty_index == empty_fields_count; } __global__ void Backtracker(Board::FieldValue *old_boards, int *old_boards_count, uint8_t *empty_fields, uint8_t *empty_fields_count, Board::FieldValue *solved_board, int *solved_board_mutex) { for (int index = blockIdx.x * kThreadsPerBlock + threadIdx.x; index < *old_boards_count; index += kThreadsPerBlock * kBlocks) { if (*solved_board_mutex) return; auto index_mul = index * Board::kBoardSize * Board::kBoardSize; if (Solve(old_boards + index_mul, empty_fields + index_mul, empty_fields_count[index])) { atomicCAS(solved_board_mutex, 0, blockIdx.x * kThreadsPerBlock + threadIdx.x); if (*solved_board_mutex != blockIdx.x * kThreadsPerBlock + threadIdx.x) return; for (int i = 0; i < Board::kBoardSize * Board::kBoardSize; ++i) solved_board[i] = old_boards[index_mul + i]; } } } __global__ void Simplificator(Board::FieldValue *old_boards, int *old_boards_count, Board::FieldValue *new_boards, int *new_boards_count) { __shared__ Board::FieldValue s_board[Board::kBoardSize][Board::kBoardSize]; __shared__ int pos; pos = 0; for (int index = blockIdx.x; index < *old_boards_count; index += kBlocks) { __syncthreads(); bool active = !(s_board[threadIdx.y][threadIdx.x] = (old_boards + index * Board::kBoardSize * Board::kBoardSize)[Board::kBoardSize * threadIdx.y + threadIdx.x]); __syncthreads(); auto pv = GetPossibleValues(s_board); auto nelems = __popc(pv); while (__syncthreads_or(active && nelems == 1)) { if (active && nelems == 1) { s_board[threadIdx.y][threadIdx.x] = __ffs(pv); active = false; } __syncthreads(); if (active) { pv = GetPossibleValues(s_board); nelems = __popc(pv); } } if (__syncthreads_or(active && nelems == 0)) continue; if (__syncthreads_and( IsValid(reinterpret_cast<Board::FieldValue *>(s_board), threadIdx.y, threadIdx.x))) { if (threadIdx.x + threadIdx.y == 0) pos = atomicAdd(new_boards_count, 1); __syncthreads(); (new_boards + pos * Board::kBoardSize * Board::kBoardSize)[Board::kBoardSize * threadIdx.y + threadIdx.x] = s_board[threadIdx.y][threadIdx.x]; } } } class ScopedCudaEvent { public: ScopedCudaEvent() { gpuErrchk(cudaEventCreate(&event_)); } ~ScopedCudaEvent() { cudaEventDestroy(event_); } cudaEvent_t Get() { return event_; } void Record() { gpuErrchk(cudaEventRecord(event_)); } void Sync() { gpuErrchk(cudaEventSynchronize(event_)); } private: cudaEvent_t event_; ScopedCudaEvent(ScopedCudaEvent const &) = delete; ScopedCudaEvent &operator=(ScopedCudaEvent const &) = delete; }; class ScopedCudaStream { public: ScopedCudaStream() { gpuErrchk(cudaStreamCreate(&stream_)); } ~ScopedCudaStream() { cudaStreamDestroy(stream_); } cudaStream_t Get() { return stream_; } void Sync() { gpuErrchk(cudaStreamSynchronize(stream_)); } private: cudaStream_t stream_; ScopedCudaStream(ScopedCudaStream const &) = delete; ScopedCudaStream &operator=(ScopedCudaStream const &) = delete; }; } // namespace std::vector<Board::FieldValue> Run(std::vector<Board::FieldValue> const &board) { Board::FieldValue *d_old_boards = deviceResourceManager::GetOldBoards(); int *d_old_boards_count = deviceResourceManager::GetOldBoardsCount(); Board::FieldValue *d_new_boards = deviceResourceManager::GetNewBoards(); int *d_new_boards_count = deviceResourceManager::GetNewBoardsCount(); Board::FieldValue *d_solved_board = deviceResourceManager::GetSolvedBoard(); int *d_solved_board_mutex = deviceResourceManager::GetSolvedBoardMutex(); uint8_t *d_empty_fields = deviceResourceManager::GetEmptyFields(); uint8_t *d_empty_fields_count = deviceResourceManager::GetEmptyFieldsCount(); ScopedCudaStream kernel_stream; ScopedCudaStream old_boards_set_stream, new_boards_set_stream, empty_fields_set_stream, empty_fields_count_set_stream; ScopedCudaEvent start, stop; start.Record(); gpuErrchk(cudaMemsetAsync(d_old_boards, 0, deviceResourceManager::kNBoards * Board::kBoardSize * Board::kBoardSize * sizeof(Board::FieldValue), old_boards_set_stream.Get())); gpuErrchk(cudaMemsetAsync(d_new_boards, 0, deviceResourceManager::kNBoards * Board::kBoardSize * Board::kBoardSize * sizeof(Board::FieldValue), new_boards_set_stream.Get())); gpuErrchk(cudaMemsetAsync(d_empty_fields, 0, deviceResourceManager::kNBoards * Board::kBoardSize * Board::kBoardSize * sizeof(uint8_t), empty_fields_set_stream.Get())); gpuErrchk(cudaMemsetAsync(d_empty_fields_count, 0, deviceResourceManager::kNBoards * sizeof(uint8_t), empty_fields_count_set_stream.Get())); std::unique_ptr<int> one(new int(1)); gpuErrchk(cudaMemcpy(d_old_boards_count, one.get(), sizeof(int), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_old_boards, board.data(), Board::kBoardSize * Board::kBoardSize * sizeof(Board::FieldValue), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemset(d_solved_board, 0, Board::kBoardSize * Board::kBoardSize * sizeof(Board::FieldValue))); gpuErrchk(cudaMemset(d_solved_board_mutex, 0, sizeof(int))); old_boards_set_stream.Sync(); new_boards_set_stream.Sync(); empty_fields_set_stream.Sync(); empty_fields_count_set_stream.Sync(); gpuErrchk(cudaMemset(d_new_boards_count, 0, sizeof(int))); Generator<<<kBlocks, kThreadsPerBlock, 0, kernel_stream.Get()>>>( d_old_boards, d_old_boards_count, d_new_boards, d_new_boards_count, d_empty_fields, d_empty_fields_count, d_solved_board, d_solved_board_mutex); std::swap(d_old_boards, d_new_boards); std::swap(d_old_boards_count, d_new_boards_count); kernel_stream.Sync(); for (int i = 0; i < kIterations; ++i) { gpuErrchk(cudaMemset(d_new_boards_count, 0, sizeof(int))); Simplificator<<<kBlocks, dim3(Board::kBoardSize, Board::kBoardSize), 0, kernel_stream.Get()>>>(d_old_boards, d_old_boards_count, d_new_boards, d_new_boards_count); std::swap(d_old_boards, d_new_boards); std::swap(d_old_boards_count, d_new_boards_count); kernel_stream.Sync(); gpuErrchk(cudaMemset(d_new_boards_count, 0, sizeof(int))); Generator<<<kBlocks, kThreadsPerBlock, 0, kernel_stream.Get()>>>( d_old_boards, d_old_boards_count, d_new_boards, d_new_boards_count, d_empty_fields, d_empty_fields_count, d_solved_board, d_solved_board_mutex); std::swap(d_old_boards, d_new_boards); std::swap(d_old_boards_count, d_new_boards_count); kernel_stream.Sync(); } int solved = 0; gpuErrchk(cudaMemcpy(&solved, d_solved_board_mutex, sizeof(int), cudaMemcpyDeviceToHost)); if (!solved) { Backtracker<<<kBlocks, kThreadsPerBlock, 0, kernel_stream.Get()>>>( d_old_boards, d_old_boards_count, d_empty_fields, d_empty_fields_count, d_solved_board, d_solved_board_mutex); kernel_stream.Sync(); } std::unique_ptr<Board::FieldValue[]> ret( new Board::FieldValue[Board::kBoardSize * Board::kBoardSize]); gpuErrchk(cudaMemcpy(ret.get(), d_solved_board, Board::kBoardSize * Board::kBoardSize * sizeof(Board::FieldValue), cudaMemcpyDeviceToHost)); stop.Record(); stop.Sync(); float ms = 0; gpuErrchk(cudaEventElapsedTime(&ms, start.Get(), stop.Get())); timeManager::AddTimeElapsed(ms); return {ret.get(), ret.get() + Board::kBoardSize * Board::kBoardSize}; } } // namespace kernel } // namespace sudoku
1ad2bee2ec104f2dd34e9f2d7686364a1f624a11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "vptree.cuh" void createVPTree(float *dataset, int n, int d){ float *d_dataset = NULL; float *d_distances = NULL; float *d_indexes = NULL; float *d_tree = NULL; float *d_vp_points = NULL; unsigned long length = n * d; unsigned long tree_depth = log2(n); unsigned long tree_size = n * tree_depth; unsigned long vp_points_size = n - 1; // transfer dataset to device hipMalloc(&d_tree, tree_size * sizeof(float)); hipMalloc(&d_dataset, length * sizeof(float)); // keeps the dataset hipMalloc(&d_distances, n * sizeof(float)); // keeps the current distance results in each level hipMalloc(&d_indexes, n * sizeof(float)); // index map so we dont need to swap the whole element - only their indexes hipMalloc(&d_vp_points, vp_points_size * sizeof(float)); hipMemcpy(d_dataset, dataset, length * sizeof(float), hipMemcpyHostToDevice); initIndexes(d_indexes, n); for(int i = 1; i <= n/2; i <<= 1){ parallelDistance(d_distances, d_dataset, d_indexes, &d_vp_points[i-1], n, d, i); // calculate distances copyIndexes(&d_tree[(unsigned int)log2(i) * n], d_indexes, n); // store indexes in tree level bitonic(d_distances, d_indexes, n, DESCENDING, i); // sorted in descending order so as to suffle the vantage point } #ifdef GLOBAL_SYNCHRONIZATION hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) cout << "Sync kernel error: " << hipGetErrorString(errSync) << " in vp tree" << endl; if (errAsync != hipSuccess) cout << "Sync kernel error: " << hipGetErrorString(errAsync) << " in vp tree" << endl; #endif hipFree(d_tree); hipFree(d_dataset); hipFree(d_distances); hipFree(d_indexes); hipFree(d_vp_points); } void initIndexes(float *d, int n){ dim3 block(512, 1); dim3 grid(CEIL_DIV(n, 512), 1); hipLaunchKernelGGL(( cudaInitIndexes), dim3(grid), dim3(block), 0, 0, d, n); hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess){ cout << "Sync kernel error: " << hipGetErrorString(errSync) << " in init indexing reduce: " << endl; exit(SYNCH_CUDA_ERROR); } if (errAsync != hipSuccess){ cout << "Sync kernel error: " << hipGetErrorString(errSync) << " in init indexing: " << endl; exit(ASYNC_CUDA_ERROR); } } void copyIndexes(float *dest, float *source, int n){ dim3 block(512, 1); dim3 grid(CEIL_DIV(n, 512), 1); hipLaunchKernelGGL(( cudaCopyIndexes), dim3(grid), dim3(block), 0, 0, dest, source, n); hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess){ cout << "Sync kernel error: " << hipGetErrorString(errSync) << " in init indexing reduce: " << endl; exit(SYNCH_CUDA_ERROR); } if (errAsync != hipSuccess){ cout << "Sync kernel error: " << hipGetErrorString(errSync) << " in init indexing: " << endl; exit(ASYNC_CUDA_ERROR); } } __global__ void cudaInitIndexes(float *d, int n){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < n){ d[thread_id] = thread_id; } } __global__ void cudaCopyIndexes(float *dest, float *source, int n){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < n){ dest[thread_id] = source[thread_id]; } }
1ad2bee2ec104f2dd34e9f2d7686364a1f624a11.cu
#include "vptree.cuh" void createVPTree(float *dataset, int n, int d){ float *d_dataset = NULL; float *d_distances = NULL; float *d_indexes = NULL; float *d_tree = NULL; float *d_vp_points = NULL; unsigned long length = n * d; unsigned long tree_depth = log2(n); unsigned long tree_size = n * tree_depth; unsigned long vp_points_size = n - 1; // transfer dataset to device cudaMalloc(&d_tree, tree_size * sizeof(float)); cudaMalloc(&d_dataset, length * sizeof(float)); // keeps the dataset cudaMalloc(&d_distances, n * sizeof(float)); // keeps the current distance results in each level cudaMalloc(&d_indexes, n * sizeof(float)); // index map so we dont need to swap the whole element - only their indexes cudaMalloc(&d_vp_points, vp_points_size * sizeof(float)); cudaMemcpy(d_dataset, dataset, length * sizeof(float), cudaMemcpyHostToDevice); initIndexes(d_indexes, n); for(int i = 1; i <= n/2; i <<= 1){ parallelDistance(d_distances, d_dataset, d_indexes, &d_vp_points[i-1], n, d, i); // calculate distances copyIndexes(&d_tree[(unsigned int)log2(i) * n], d_indexes, n); // store indexes in tree level bitonic(d_distances, d_indexes, n, DESCENDING, i); // sorted in descending order so as to suffle the vantage point } #ifdef GLOBAL_SYNCHRONIZATION cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) cout << "Sync kernel error: " << cudaGetErrorString(errSync) << " in vp tree" << endl; if (errAsync != cudaSuccess) cout << "Sync kernel error: " << cudaGetErrorString(errAsync) << " in vp tree" << endl; #endif cudaFree(d_tree); cudaFree(d_dataset); cudaFree(d_distances); cudaFree(d_indexes); cudaFree(d_vp_points); } void initIndexes(float *d, int n){ dim3 block(512, 1); dim3 grid(CEIL_DIV(n, 512), 1); cudaInitIndexes<<<grid, block>>>(d, n); cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess){ cout << "Sync kernel error: " << cudaGetErrorString(errSync) << " in init indexing reduce: " << endl; exit(SYNCH_CUDA_ERROR); } if (errAsync != cudaSuccess){ cout << "Sync kernel error: " << cudaGetErrorString(errSync) << " in init indexing: " << endl; exit(ASYNC_CUDA_ERROR); } } void copyIndexes(float *dest, float *source, int n){ dim3 block(512, 1); dim3 grid(CEIL_DIV(n, 512), 1); cudaCopyIndexes<<<grid, block>>>(dest, source, n); cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess){ cout << "Sync kernel error: " << cudaGetErrorString(errSync) << " in init indexing reduce: " << endl; exit(SYNCH_CUDA_ERROR); } if (errAsync != cudaSuccess){ cout << "Sync kernel error: " << cudaGetErrorString(errSync) << " in init indexing: " << endl; exit(ASYNC_CUDA_ERROR); } } __global__ void cudaInitIndexes(float *d, int n){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < n){ d[thread_id] = thread_id; } } __global__ void cudaCopyIndexes(float *dest, float *source, int n){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < n){ dest[thread_id] = source[thread_id]; } }
ff70371605acdd2452ce5c035b01996bb2305142.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdlib> #include <hip/hip_runtime.h> #include <quda_internal.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <quda_matrix.h> #include <index_helper.cuh> #include <cassert> /** @file clover_deriv_quda.cu @brief This kernel has been a bit of a pain to optimize since it is excessively register bound. To reduce register pressure we use shared memory to help offload some of this pressure. Annoyingly, the optimal approach for CUDA 8.0 is not the same as CUDA 7.5, so implementation is compiler version dependent. The CUDA 8.0 optimal code runs 10x slower on 7.5, though the 7.5 code runs fine on 8.0. CUDA >= 8.0 - Used shared memory for force accumulator matrix - Template mu / nu to prevent register spilling of indexing arrays - Force the computation routine to inline CUDA <= 7.5 - Used shared memory for force accumulator matrix - Keep mu/nu dynamic and use shared memory to store indexing arrays - Do not inline computation routine For the shared-memory dynamic indexing arrays, we use chars, since the array is 4-d, a 4-d coordinate can be stored in a single word which means that we will not have to worry about bank conflicts, and the shared array can be passed to the usual indexing routines (getCoordsExtended and linkIndexShift) with no code changes. This strategy works as long as each local lattice coordinate is less than 256. */ #if (TORCH_HIP_VERSION < 8000) #define DYNAMIC_MU_NU #endif // Use shared memory for the force accumulator matrix #define SHARED_ACCUMULATOR #ifdef DYNAMIC_MU_NU // When using dynamic mu/nu indexing, to avoid local spills use shared // memory for the per thread indexing arrays. // FIXME for reasons I don't understand, the shared array breaks in multi-GPU mode //#define SHARED_ARRAY #endif // DYNAMIC_MU_NU namespace quda { #ifdef SHARED_ACCUMULATOR #define DECLARE_LINK(U) \ extern __shared__ int s[]; \ real *U = (real*)s; \ { \ const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; \ const int block = blockDim.x * blockDim.y * blockDim.z; \ for (int i=0; i<18; i++) force[i*block + tid] = 0.0; \ } #define LINK real* template <typename real, typename Link> __device__ inline void axpy(real a, const real *x, Link &y) { const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; const int block = blockDim.x * blockDim.y * blockDim.z; #pragma unroll for (int i=0; i<9; i++) { y.data[i] += a * complex<real>(x[(2*i+0)*block + tid], x[(2*i+1)*block + tid]); } } template <typename real, typename Link> __device__ inline void operator+=(real *y, const Link &x) { const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; const int block = blockDim.x * blockDim.y * blockDim.z; #pragma unroll for (int i=0; i<9; i++) { y[(2*i+0)*block + tid] += x.data[i].real(); y[(2*i+1)*block + tid] += x.data[i].imag(); } } template <typename real, typename Link> __device__ inline void operator-=(real *y, const Link &x) { const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; const int block = blockDim.x * blockDim.y * blockDim.z; #pragma unroll for (int i=0; i<9; i++) { y[(2*i+0)*block + tid] -= x.data[i].real(); y[(2*i+1)*block + tid] -= x.data[i].imag(); } } #else #define DECLARE_LINK(U) Link U; #define LINK Link & template <typename real, typename Link> __device__ inline void axpy(real a, const Link &x, Link &y) { y += a*x; } #endif #if defined(SHARED_ARRAY) && defined(SHARED_ACCUMULATOR) #define DECLARE_ARRAY(d, idx) \ unsigned char *d; \ { \ extern __shared__ int s[]; \ int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; \ int block = blockDim.x*blockDim.y*blockDim.z; \ int offset = 18*block*sizeof(real)/sizeof(int) + idx*block + tid; \ s[offset] = 0; \ d = (unsigned char*)&s[offset]; \ } #elif defined(SHARED_ARRAY) #error Cannot use SHARED_ARRAY with SHARED_ACCUMULATOR #else #define DECLARE_ARRAY(d, idx) \ int d[4] = {0, 0, 0, 0}; #endif #ifdef GPU_CLOVER_DIRAC template<class Float, typename Force, typename Gauge, typename Oprod> struct CloverDerivArg { int X[4]; int E[4]; int border[4]; Float coeff; int parity; int volumeCB; Force force; Gauge gauge; Oprod oprod; CloverDerivArg(const Force& force, const Gauge& gauge, const Oprod& oprod, const int *X_, const int *E_, double coeff, int parity) : coeff(coeff), parity(parity), volumeCB(force.volumeCB), force(force), gauge(gauge), oprod(oprod) { for(int dir=0; dir<4; ++dir) { this->X[dir] = X_[dir]; this->E[dir] = E_[dir]; this->border[dir] = (E_[dir] - X_[dir])/2; } } }; #ifdef DYNAMIC_MU_NU template <typename real, typename Arg, typename Link> __device__ void computeForce(LINK force, Arg &arg, int xIndex, int yIndex, int mu, int nu) { #else template <typename real, typename Arg, int mu, int nu, typename Link> __device__ __forceinline__ void computeForce(LINK force, Arg &arg, int xIndex, int yIndex) { #endif int otherparity = (1-arg.parity); const int tidx = mu > nu ? (mu-1)*mu/2 + nu : (nu-1)*nu/2 + mu; if (yIndex == 0) { // do "this" force DECLARE_ARRAY(x, 1); getCoordsExtended(x, xIndex, arg.X, arg.parity, arg.border); // U[mu](x) U[nu](x+mu) U[*mu](x+nu) U[*nu](x) Oprod(x) { DECLARE_ARRAY(d,0); // load U(x)_(+mu) Link U1 = arg.gauge(mu, linkIndexShift(x, d, arg.E), arg.parity); // load U(x+mu)_(+nu) d[mu]++; Link U2 = arg.gauge(nu, linkIndexShift(x, d, arg.E), otherparity); d[mu]--; // load U(x+nu)_(+mu) d[nu]++; Link U3 = arg.gauge(mu, linkIndexShift(x, d, arg.E), otherparity); d[nu]--; // load U(x)_(+nu) Link U4 = arg.gauge(nu, linkIndexShift(x, d, arg.E), arg.parity); // load Oprod Link Oprod1 = arg.oprod(tidx, linkIndexShift(x, d, arg.E), arg.parity); if (nu < mu) force -= U1*U2*conj(U3)*conj(U4)*Oprod1; else force += U1*U2*conj(U3)*conj(U4)*Oprod1; d[mu]++; d[nu]++; Link Oprod2 = arg.oprod(tidx, linkIndexShift(x, d, arg.E), arg.parity); d[mu]--; d[nu]--; if (nu < mu) force -= U1*U2*Oprod2*conj(U3)*conj(U4); else force += U1*U2*Oprod2*conj(U3)*conj(U4); } { DECLARE_ARRAY(d,0); // load U(x-nu)(+nu) d[nu]--; Link U1 = arg.gauge(nu, linkIndexShift(x, d, arg.E), otherparity); d[nu]++; // load U(x-nu)(+mu) d[nu]--; Link U2 = arg.gauge(mu, linkIndexShift(x, d, arg.E), otherparity); d[nu]++; // load U(x+mu-nu)(nu) d[mu]++; d[nu]--; Link U3 = arg.gauge(nu, linkIndexShift(x, d, arg.E), arg.parity); d[mu]--; d[nu]++; // load U(x)_(+mu) Link U4 = arg.gauge(mu, linkIndexShift(x, d, arg.E), arg.parity); d[mu]++; d[nu]--; Link Oprod1 = arg.oprod(tidx, linkIndexShift(x, d, arg.E), arg.parity); d[mu]--; d[nu]++; if (nu < mu) force += conj(U1)*U2*Oprod1*U3*conj(U4); else force -= conj(U1)*U2*Oprod1*U3*conj(U4); Link Oprod4 = arg.oprod(tidx, linkIndexShift(x, d, arg.E), arg.parity); if (nu < mu) force += Oprod4*conj(U1)*U2*U3*conj(U4); else force -= Oprod4*conj(U1)*U2*U3*conj(U4); } } else { // else do other force DECLARE_ARRAY(y, 1); getCoordsExtended(y, xIndex, arg.X, otherparity, arg.border); { DECLARE_ARRAY(d,0); // load U(x)_(+mu) Link U1 = arg.gauge(mu, linkIndexShift(y, d, arg.E), otherparity); // load U(x+mu)_(+nu) d[mu]++; Link U2 = arg.gauge(nu, linkIndexShift(y, d, arg.E), arg.parity); d[mu]--; // load U(x+nu)_(+mu) d[nu]++; Link U3 = arg.gauge(mu, linkIndexShift(y, d, arg.E), arg.parity); d[nu]--; // load U(x)_(+nu) Link U4 = arg.gauge(nu, linkIndexShift(y, d, arg.E), otherparity); // load opposite parity Oprod d[nu]++; Link Oprod3 = arg.oprod(tidx, linkIndexShift(y, d, arg.E), arg.parity); d[nu]--; if (nu < mu) force -= U1*U2*conj(U3)*Oprod3*conj(U4); else force += U1*U2*conj(U3)*Oprod3*conj(U4); // load Oprod(x+mu) d[mu]++; Link Oprod4 = arg.oprod(tidx, linkIndexShift(y, d, arg.E), arg.parity); d[mu]--; if (nu < mu) force -= U1*Oprod4*U2*conj(U3)*conj(U4); else force += U1*Oprod4*U2*conj(U3)*conj(U4); } // Lower leaf // U[nu*](x-nu) U[mu](x-nu) U[nu](x+mu-nu) Oprod(x+mu) U[*mu](x) { DECLARE_ARRAY(d,0); // load U(x-nu)(+nu) d[nu]--; Link U1 = arg.gauge(nu, linkIndexShift(y, d, arg.E), arg.parity); d[nu]++; // load U(x-nu)(+mu) d[nu]--; Link U2 = arg.gauge(mu, linkIndexShift(y, d, arg.E), arg.parity); d[nu]++; // load U(x+mu-nu)(nu) d[mu]++; d[nu]--; Link U3 = arg.gauge(nu, linkIndexShift(y, d, arg.E), otherparity); d[mu]--; d[nu]++; // load U(x)_(+mu) Link U4 = arg.gauge(mu, linkIndexShift(y, d, arg.E), otherparity); // load Oprod(x+mu) d[mu]++; Link Oprod1 = arg.oprod(tidx, linkIndexShift(y, d, arg.E), arg.parity); d[mu]--; if (nu < mu) force += conj(U1)*U2*U3*Oprod1*conj(U4); else force -= conj(U1)*U2*U3*Oprod1*conj(U4); d[nu]--; Link Oprod2 = arg.oprod(tidx, linkIndexShift(y, d, arg.E), arg.parity); d[nu]++; if (nu < mu) force += conj(U1)*Oprod2*U2*U3*conj(U4); else force -= conj(U1)*Oprod2*U2*U3*conj(U4); } } } template<typename real, typename Arg> __global__ void cloverDerivativeKernel(Arg arg) { int index = threadIdx.x + blockIdx.x*blockDim.x; if (index >= arg.volumeCB) return; // y index determines whether we're updating arg.parity or (1-arg.parity) int yIndex = threadIdx.y + blockIdx.y*blockDim.y; if (yIndex >= 2) return; // mu index is mapped from z thread index int mu = threadIdx.z + blockIdx.z*blockDim.z; if (mu >= 4) return; typedef complex<real> Complex; typedef Matrix<Complex,3> Link; DECLARE_LINK(force); #ifdef DYNAMIC_MU_NU for (int nu=0; nu<4; nu++) { if (mu==nu) continue; computeForce<real,Arg,Link>(force, arg, index, yIndex, mu, nu); } #else switch(mu) { case 0: computeForce<real,Arg,0,1,Link>(force, arg, index, yIndex); computeForce<real,Arg,0,2,Link>(force, arg, index, yIndex); computeForce<real,Arg,0,3,Link>(force, arg, index, yIndex); break; case 1: computeForce<real,Arg,1,0,Link>(force, arg, index, yIndex); computeForce<real,Arg,1,3,Link>(force, arg, index, yIndex); computeForce<real,Arg,1,2,Link>(force, arg, index, yIndex); break; case 2: computeForce<real,Arg,2,3,Link>(force, arg, index, yIndex); computeForce<real,Arg,2,0,Link>(force, arg, index, yIndex); computeForce<real,Arg,2,1,Link>(force, arg, index, yIndex); break; case 3: computeForce<real,Arg,3,2,Link>(force, arg, index, yIndex); computeForce<real,Arg,3,1,Link>(force, arg, index, yIndex); computeForce<real,Arg,3,0,Link>(force, arg, index, yIndex); break; } #endif // Write to array Link F; arg.force.load((real*)(F.data), index, mu, yIndex == 0 ? arg.parity : 1-arg.parity); axpy(arg.coeff, force, F); arg.force.save((real*)(F.data), index, mu, yIndex == 0 ? arg.parity : 1-arg.parity); return; } // cloverDerivativeKernel template<typename Float, typename Arg> class CloverDerivative : public TunableVectorY { private: Arg arg; const GaugeField &meta; #if defined(SHARED_ACCUMULATOR) && defined(SHARED_ARRAY) unsigned int sharedBytesPerThread() const { return 18*sizeof(Float) + 8; } #elif defined(SHARED_ACCUMULATOR) unsigned int sharedBytesPerThread() const { return 18*sizeof(Float); } #else unsigned int sharedBytesPerThread() const { return 0; } #endif unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return arg.volumeCB; } bool tuneGridDim() const { return false; } // don't tune the grid dimension public: CloverDerivative(const Arg &arg, const GaugeField &meta) : TunableVectorY(2), arg(arg), meta(meta) { writeAuxString("threads=%d,prec=%lu,fstride=%d,gstride=%d,ostride=%d", arg.volumeCB,sizeof(Float),arg.force.stride, arg.gauge.stride,arg.oprod.stride); } virtual ~CloverDerivative() {} void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); hipLaunchKernelGGL(( cloverDerivativeKernel<Float>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg); } // apply bool advanceBlockDim(TuneParam &param) const { dim3 block = param.block; dim3 grid = param.grid; bool rtn = TunableVectorY::advanceBlockDim(param); param.block.z = block.z; param.grid.z = grid.z; if (!rtn) { if (param.block.z < 4) { param.block.z++; param.grid.z = (4 + param.block.z - 1) / param.block.z; rtn = true; } else { param.block.z = 1; param.grid.z = 4; rtn = false; } } return rtn; } void initTuneParam(TuneParam &param) const { TunableVectorY::initTuneParam(param); param.block.y = 1; param.block.z = 1; param.grid.y = 2; param.grid.z = 4; } void defaultTuneParam(TuneParam &param) const { initTuneParam(param); } // The force field is updated so we must preserve its initial state void preTune() { arg.force.save(); } void postTune(){ arg.force.load(); } long long flops() const { return 16 * 198 * 3 * 4 * 2 * (long long)arg.volumeCB; } long long bytes() const { return ((8*arg.gauge.Bytes() + 4*arg.oprod.Bytes())*3 + 2*arg.force.Bytes()) * 4 * 2 * arg.volumeCB; } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } }; template<typename Float> void cloverDerivative(cudaGaugeField &force, cudaGaugeField &gauge, cudaGaugeField &oprod, double coeff, int parity) { if (oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) errorQuda("Force field does not support reconstruction"); if (force.Order() != oprod.Order()) errorQuda("Force and Oprod orders must match"); if (force.Reconstruct() != QUDA_RECONSTRUCT_NO) errorQuda("Force field does not support reconstruction"); if (force.Order() == QUDA_FLOAT2_GAUGE_ORDER){ typedef gauge::FloatNOrder<Float, 18, 2, 18> F; typedef gauge::FloatNOrder<Float, 18, 2, 18> O; if (gauge.isNative()) { if (gauge.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; typedef CloverDerivArg<Float,F,G,O> Arg; Arg arg(F(force), G(gauge), O(oprod), force.X(), oprod.X(), coeff, parity); CloverDerivative<Float, Arg> deriv(arg, gauge); deriv.apply(0); #if 0 } else if (gauge.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; typedef CloverDerivArg<Float,F,G,O> Arg; Arg arg(F(force), G(gauge), O(oprod), force.X(), oprod.X(), coeff, parity); CloverDerivative<Float, Arg> deriv(arg, gauge); deriv.apply(0); #endif } else { errorQuda("Reconstruction type %d not supported",gauge.Reconstruct()); } } else { errorQuda("Gauge order %d not supported", gauge.Order()); } } else { errorQuda("Force order %d not supported", force.Order()); } // force / oprod order qudaDeviceSynchronize(); } #endif // GPU_CLOVER void cloverDerivative(cudaGaugeField &force, cudaGaugeField &gauge, cudaGaugeField &oprod, double coeff, QudaParity parity) { #ifdef GPU_CLOVER_DIRAC assert(oprod.Geometry() == QUDA_TENSOR_GEOMETRY); assert(force.Geometry() == QUDA_VECTOR_GEOMETRY); for (int d=0; d<4; d++) { if (oprod.X()[d] != gauge.X()[d]) errorQuda("Incompatible extended dimensions d=%d gauge=%d oprod=%d", d, gauge.X()[d], oprod.X()[d]); } int device_parity = (parity == QUDA_EVEN_PARITY) ? 0 : 1; if(force.Precision() == QUDA_DOUBLE_PRECISION){ cloverDerivative<double>(force, gauge, oprod, coeff, device_parity); #if 0 } else if (force.Precision() == QUDA_SINGLE_PRECISION){ cloverDerivative<float>(force, gauge, oprod, coeff, device_parity); #endif } else { errorQuda("Precision %d not supported", force.Precision()); } return; #else errorQuda("Clover has not been built"); #endif } } // namespace quda
ff70371605acdd2452ce5c035b01996bb2305142.cu
#include <cstdio> #include <cstdlib> #include <cuda.h> #include <quda_internal.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <quda_matrix.h> #include <index_helper.cuh> #include <cassert> /** @file clover_deriv_quda.cu @brief This kernel has been a bit of a pain to optimize since it is excessively register bound. To reduce register pressure we use shared memory to help offload some of this pressure. Annoyingly, the optimal approach for CUDA 8.0 is not the same as CUDA 7.5, so implementation is compiler version dependent. The CUDA 8.0 optimal code runs 10x slower on 7.5, though the 7.5 code runs fine on 8.0. CUDA >= 8.0 - Used shared memory for force accumulator matrix - Template mu / nu to prevent register spilling of indexing arrays - Force the computation routine to inline CUDA <= 7.5 - Used shared memory for force accumulator matrix - Keep mu/nu dynamic and use shared memory to store indexing arrays - Do not inline computation routine For the shared-memory dynamic indexing arrays, we use chars, since the array is 4-d, a 4-d coordinate can be stored in a single word which means that we will not have to worry about bank conflicts, and the shared array can be passed to the usual indexing routines (getCoordsExtended and linkIndexShift) with no code changes. This strategy works as long as each local lattice coordinate is less than 256. */ #if (CUDA_VERSION < 8000) #define DYNAMIC_MU_NU #endif // Use shared memory for the force accumulator matrix #define SHARED_ACCUMULATOR #ifdef DYNAMIC_MU_NU // When using dynamic mu/nu indexing, to avoid local spills use shared // memory for the per thread indexing arrays. // FIXME for reasons I don't understand, the shared array breaks in multi-GPU mode //#define SHARED_ARRAY #endif // DYNAMIC_MU_NU namespace quda { #ifdef SHARED_ACCUMULATOR #define DECLARE_LINK(U) \ extern __shared__ int s[]; \ real *U = (real*)s; \ { \ const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; \ const int block = blockDim.x * blockDim.y * blockDim.z; \ for (int i=0; i<18; i++) force[i*block + tid] = 0.0; \ } #define LINK real* template <typename real, typename Link> __device__ inline void axpy(real a, const real *x, Link &y) { const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; const int block = blockDim.x * blockDim.y * blockDim.z; #pragma unroll for (int i=0; i<9; i++) { y.data[i] += a * complex<real>(x[(2*i+0)*block + tid], x[(2*i+1)*block + tid]); } } template <typename real, typename Link> __device__ inline void operator+=(real *y, const Link &x) { const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; const int block = blockDim.x * blockDim.y * blockDim.z; #pragma unroll for (int i=0; i<9; i++) { y[(2*i+0)*block + tid] += x.data[i].real(); y[(2*i+1)*block + tid] += x.data[i].imag(); } } template <typename real, typename Link> __device__ inline void operator-=(real *y, const Link &x) { const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; const int block = blockDim.x * blockDim.y * blockDim.z; #pragma unroll for (int i=0; i<9; i++) { y[(2*i+0)*block + tid] -= x.data[i].real(); y[(2*i+1)*block + tid] -= x.data[i].imag(); } } #else #define DECLARE_LINK(U) Link U; #define LINK Link & template <typename real, typename Link> __device__ inline void axpy(real a, const Link &x, Link &y) { y += a*x; } #endif #if defined(SHARED_ARRAY) && defined(SHARED_ACCUMULATOR) #define DECLARE_ARRAY(d, idx) \ unsigned char *d; \ { \ extern __shared__ int s[]; \ int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; \ int block = blockDim.x*blockDim.y*blockDim.z; \ int offset = 18*block*sizeof(real)/sizeof(int) + idx*block + tid; \ s[offset] = 0; \ d = (unsigned char*)&s[offset]; \ } #elif defined(SHARED_ARRAY) #error Cannot use SHARED_ARRAY with SHARED_ACCUMULATOR #else #define DECLARE_ARRAY(d, idx) \ int d[4] = {0, 0, 0, 0}; #endif #ifdef GPU_CLOVER_DIRAC template<class Float, typename Force, typename Gauge, typename Oprod> struct CloverDerivArg { int X[4]; int E[4]; int border[4]; Float coeff; int parity; int volumeCB; Force force; Gauge gauge; Oprod oprod; CloverDerivArg(const Force& force, const Gauge& gauge, const Oprod& oprod, const int *X_, const int *E_, double coeff, int parity) : coeff(coeff), parity(parity), volumeCB(force.volumeCB), force(force), gauge(gauge), oprod(oprod) { for(int dir=0; dir<4; ++dir) { this->X[dir] = X_[dir]; this->E[dir] = E_[dir]; this->border[dir] = (E_[dir] - X_[dir])/2; } } }; #ifdef DYNAMIC_MU_NU template <typename real, typename Arg, typename Link> __device__ void computeForce(LINK force, Arg &arg, int xIndex, int yIndex, int mu, int nu) { #else template <typename real, typename Arg, int mu, int nu, typename Link> __device__ __forceinline__ void computeForce(LINK force, Arg &arg, int xIndex, int yIndex) { #endif int otherparity = (1-arg.parity); const int tidx = mu > nu ? (mu-1)*mu/2 + nu : (nu-1)*nu/2 + mu; if (yIndex == 0) { // do "this" force DECLARE_ARRAY(x, 1); getCoordsExtended(x, xIndex, arg.X, arg.parity, arg.border); // U[mu](x) U[nu](x+mu) U[*mu](x+nu) U[*nu](x) Oprod(x) { DECLARE_ARRAY(d,0); // load U(x)_(+mu) Link U1 = arg.gauge(mu, linkIndexShift(x, d, arg.E), arg.parity); // load U(x+mu)_(+nu) d[mu]++; Link U2 = arg.gauge(nu, linkIndexShift(x, d, arg.E), otherparity); d[mu]--; // load U(x+nu)_(+mu) d[nu]++; Link U3 = arg.gauge(mu, linkIndexShift(x, d, arg.E), otherparity); d[nu]--; // load U(x)_(+nu) Link U4 = arg.gauge(nu, linkIndexShift(x, d, arg.E), arg.parity); // load Oprod Link Oprod1 = arg.oprod(tidx, linkIndexShift(x, d, arg.E), arg.parity); if (nu < mu) force -= U1*U2*conj(U3)*conj(U4)*Oprod1; else force += U1*U2*conj(U3)*conj(U4)*Oprod1; d[mu]++; d[nu]++; Link Oprod2 = arg.oprod(tidx, linkIndexShift(x, d, arg.E), arg.parity); d[mu]--; d[nu]--; if (nu < mu) force -= U1*U2*Oprod2*conj(U3)*conj(U4); else force += U1*U2*Oprod2*conj(U3)*conj(U4); } { DECLARE_ARRAY(d,0); // load U(x-nu)(+nu) d[nu]--; Link U1 = arg.gauge(nu, linkIndexShift(x, d, arg.E), otherparity); d[nu]++; // load U(x-nu)(+mu) d[nu]--; Link U2 = arg.gauge(mu, linkIndexShift(x, d, arg.E), otherparity); d[nu]++; // load U(x+mu-nu)(nu) d[mu]++; d[nu]--; Link U3 = arg.gauge(nu, linkIndexShift(x, d, arg.E), arg.parity); d[mu]--; d[nu]++; // load U(x)_(+mu) Link U4 = arg.gauge(mu, linkIndexShift(x, d, arg.E), arg.parity); d[mu]++; d[nu]--; Link Oprod1 = arg.oprod(tidx, linkIndexShift(x, d, arg.E), arg.parity); d[mu]--; d[nu]++; if (nu < mu) force += conj(U1)*U2*Oprod1*U3*conj(U4); else force -= conj(U1)*U2*Oprod1*U3*conj(U4); Link Oprod4 = arg.oprod(tidx, linkIndexShift(x, d, arg.E), arg.parity); if (nu < mu) force += Oprod4*conj(U1)*U2*U3*conj(U4); else force -= Oprod4*conj(U1)*U2*U3*conj(U4); } } else { // else do other force DECLARE_ARRAY(y, 1); getCoordsExtended(y, xIndex, arg.X, otherparity, arg.border); { DECLARE_ARRAY(d,0); // load U(x)_(+mu) Link U1 = arg.gauge(mu, linkIndexShift(y, d, arg.E), otherparity); // load U(x+mu)_(+nu) d[mu]++; Link U2 = arg.gauge(nu, linkIndexShift(y, d, arg.E), arg.parity); d[mu]--; // load U(x+nu)_(+mu) d[nu]++; Link U3 = arg.gauge(mu, linkIndexShift(y, d, arg.E), arg.parity); d[nu]--; // load U(x)_(+nu) Link U4 = arg.gauge(nu, linkIndexShift(y, d, arg.E), otherparity); // load opposite parity Oprod d[nu]++; Link Oprod3 = arg.oprod(tidx, linkIndexShift(y, d, arg.E), arg.parity); d[nu]--; if (nu < mu) force -= U1*U2*conj(U3)*Oprod3*conj(U4); else force += U1*U2*conj(U3)*Oprod3*conj(U4); // load Oprod(x+mu) d[mu]++; Link Oprod4 = arg.oprod(tidx, linkIndexShift(y, d, arg.E), arg.parity); d[mu]--; if (nu < mu) force -= U1*Oprod4*U2*conj(U3)*conj(U4); else force += U1*Oprod4*U2*conj(U3)*conj(U4); } // Lower leaf // U[nu*](x-nu) U[mu](x-nu) U[nu](x+mu-nu) Oprod(x+mu) U[*mu](x) { DECLARE_ARRAY(d,0); // load U(x-nu)(+nu) d[nu]--; Link U1 = arg.gauge(nu, linkIndexShift(y, d, arg.E), arg.parity); d[nu]++; // load U(x-nu)(+mu) d[nu]--; Link U2 = arg.gauge(mu, linkIndexShift(y, d, arg.E), arg.parity); d[nu]++; // load U(x+mu-nu)(nu) d[mu]++; d[nu]--; Link U3 = arg.gauge(nu, linkIndexShift(y, d, arg.E), otherparity); d[mu]--; d[nu]++; // load U(x)_(+mu) Link U4 = arg.gauge(mu, linkIndexShift(y, d, arg.E), otherparity); // load Oprod(x+mu) d[mu]++; Link Oprod1 = arg.oprod(tidx, linkIndexShift(y, d, arg.E), arg.parity); d[mu]--; if (nu < mu) force += conj(U1)*U2*U3*Oprod1*conj(U4); else force -= conj(U1)*U2*U3*Oprod1*conj(U4); d[nu]--; Link Oprod2 = arg.oprod(tidx, linkIndexShift(y, d, arg.E), arg.parity); d[nu]++; if (nu < mu) force += conj(U1)*Oprod2*U2*U3*conj(U4); else force -= conj(U1)*Oprod2*U2*U3*conj(U4); } } } template<typename real, typename Arg> __global__ void cloverDerivativeKernel(Arg arg) { int index = threadIdx.x + blockIdx.x*blockDim.x; if (index >= arg.volumeCB) return; // y index determines whether we're updating arg.parity or (1-arg.parity) int yIndex = threadIdx.y + blockIdx.y*blockDim.y; if (yIndex >= 2) return; // mu index is mapped from z thread index int mu = threadIdx.z + blockIdx.z*blockDim.z; if (mu >= 4) return; typedef complex<real> Complex; typedef Matrix<Complex,3> Link; DECLARE_LINK(force); #ifdef DYNAMIC_MU_NU for (int nu=0; nu<4; nu++) { if (mu==nu) continue; computeForce<real,Arg,Link>(force, arg, index, yIndex, mu, nu); } #else switch(mu) { case 0: computeForce<real,Arg,0,1,Link>(force, arg, index, yIndex); computeForce<real,Arg,0,2,Link>(force, arg, index, yIndex); computeForce<real,Arg,0,3,Link>(force, arg, index, yIndex); break; case 1: computeForce<real,Arg,1,0,Link>(force, arg, index, yIndex); computeForce<real,Arg,1,3,Link>(force, arg, index, yIndex); computeForce<real,Arg,1,2,Link>(force, arg, index, yIndex); break; case 2: computeForce<real,Arg,2,3,Link>(force, arg, index, yIndex); computeForce<real,Arg,2,0,Link>(force, arg, index, yIndex); computeForce<real,Arg,2,1,Link>(force, arg, index, yIndex); break; case 3: computeForce<real,Arg,3,2,Link>(force, arg, index, yIndex); computeForce<real,Arg,3,1,Link>(force, arg, index, yIndex); computeForce<real,Arg,3,0,Link>(force, arg, index, yIndex); break; } #endif // Write to array Link F; arg.force.load((real*)(F.data), index, mu, yIndex == 0 ? arg.parity : 1-arg.parity); axpy(arg.coeff, force, F); arg.force.save((real*)(F.data), index, mu, yIndex == 0 ? arg.parity : 1-arg.parity); return; } // cloverDerivativeKernel template<typename Float, typename Arg> class CloverDerivative : public TunableVectorY { private: Arg arg; const GaugeField &meta; #if defined(SHARED_ACCUMULATOR) && defined(SHARED_ARRAY) unsigned int sharedBytesPerThread() const { return 18*sizeof(Float) + 8; } #elif defined(SHARED_ACCUMULATOR) unsigned int sharedBytesPerThread() const { return 18*sizeof(Float); } #else unsigned int sharedBytesPerThread() const { return 0; } #endif unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return arg.volumeCB; } bool tuneGridDim() const { return false; } // don't tune the grid dimension public: CloverDerivative(const Arg &arg, const GaugeField &meta) : TunableVectorY(2), arg(arg), meta(meta) { writeAuxString("threads=%d,prec=%lu,fstride=%d,gstride=%d,ostride=%d", arg.volumeCB,sizeof(Float),arg.force.stride, arg.gauge.stride,arg.oprod.stride); } virtual ~CloverDerivative() {} void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); cloverDerivativeKernel<Float><<<tp.grid,tp.block,tp.shared_bytes>>>(arg); } // apply bool advanceBlockDim(TuneParam &param) const { dim3 block = param.block; dim3 grid = param.grid; bool rtn = TunableVectorY::advanceBlockDim(param); param.block.z = block.z; param.grid.z = grid.z; if (!rtn) { if (param.block.z < 4) { param.block.z++; param.grid.z = (4 + param.block.z - 1) / param.block.z; rtn = true; } else { param.block.z = 1; param.grid.z = 4; rtn = false; } } return rtn; } void initTuneParam(TuneParam &param) const { TunableVectorY::initTuneParam(param); param.block.y = 1; param.block.z = 1; param.grid.y = 2; param.grid.z = 4; } void defaultTuneParam(TuneParam &param) const { initTuneParam(param); } // The force field is updated so we must preserve its initial state void preTune() { arg.force.save(); } void postTune(){ arg.force.load(); } long long flops() const { return 16 * 198 * 3 * 4 * 2 * (long long)arg.volumeCB; } long long bytes() const { return ((8*arg.gauge.Bytes() + 4*arg.oprod.Bytes())*3 + 2*arg.force.Bytes()) * 4 * 2 * arg.volumeCB; } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } }; template<typename Float> void cloverDerivative(cudaGaugeField &force, cudaGaugeField &gauge, cudaGaugeField &oprod, double coeff, int parity) { if (oprod.Reconstruct() != QUDA_RECONSTRUCT_NO) errorQuda("Force field does not support reconstruction"); if (force.Order() != oprod.Order()) errorQuda("Force and Oprod orders must match"); if (force.Reconstruct() != QUDA_RECONSTRUCT_NO) errorQuda("Force field does not support reconstruction"); if (force.Order() == QUDA_FLOAT2_GAUGE_ORDER){ typedef gauge::FloatNOrder<Float, 18, 2, 18> F; typedef gauge::FloatNOrder<Float, 18, 2, 18> O; if (gauge.isNative()) { if (gauge.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; typedef CloverDerivArg<Float,F,G,O> Arg; Arg arg(F(force), G(gauge), O(oprod), force.X(), oprod.X(), coeff, parity); CloverDerivative<Float, Arg> deriv(arg, gauge); deriv.apply(0); #if 0 } else if (gauge.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; typedef CloverDerivArg<Float,F,G,O> Arg; Arg arg(F(force), G(gauge), O(oprod), force.X(), oprod.X(), coeff, parity); CloverDerivative<Float, Arg> deriv(arg, gauge); deriv.apply(0); #endif } else { errorQuda("Reconstruction type %d not supported",gauge.Reconstruct()); } } else { errorQuda("Gauge order %d not supported", gauge.Order()); } } else { errorQuda("Force order %d not supported", force.Order()); } // force / oprod order qudaDeviceSynchronize(); } #endif // GPU_CLOVER void cloverDerivative(cudaGaugeField &force, cudaGaugeField &gauge, cudaGaugeField &oprod, double coeff, QudaParity parity) { #ifdef GPU_CLOVER_DIRAC assert(oprod.Geometry() == QUDA_TENSOR_GEOMETRY); assert(force.Geometry() == QUDA_VECTOR_GEOMETRY); for (int d=0; d<4; d++) { if (oprod.X()[d] != gauge.X()[d]) errorQuda("Incompatible extended dimensions d=%d gauge=%d oprod=%d", d, gauge.X()[d], oprod.X()[d]); } int device_parity = (parity == QUDA_EVEN_PARITY) ? 0 : 1; if(force.Precision() == QUDA_DOUBLE_PRECISION){ cloverDerivative<double>(force, gauge, oprod, coeff, device_parity); #if 0 } else if (force.Precision() == QUDA_SINGLE_PRECISION){ cloverDerivative<float>(force, gauge, oprod, coeff, device_parity); #endif } else { errorQuda("Precision %d not supported", force.Precision()); } return; #else errorQuda("Clover has not been built"); #endif } } // namespace quda
386e72f8a62d96d872304f2e18e73862a5f1a060.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // advanced encryption standard // author: karl malbrain, [email protected] /* This work, including the source code, documentation and related data, is placed into the public domain. The orginal author is Karl Malbrain. THIS SOFTWARE IS PROVIDED AS-IS WITHOUT WARRANTY OF ANY KIND, NOT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY. THE AUTHOR OF THIS SOFTWARE, ASSUMES _NO_ RESPONSIBILITY FOR ANY CONSEQUENCE RESULTING FROM THE USE, MODIFICATION, OR REDISTRIBUTION OF THIS SOFTWARE. */ typedef unsigned char uchar; #include <string.h> #include <memory.h> #include <iostream> using namespace std; // AES only supports Nb=4 #define Nb 4 // number of columns in the state & expanded key #define Nk 4 // number of columns in a key #define Nr 10 // number of rounds in encryption const size_t BUF_SIZE = 1000000000; __device__ uchar Sbox[256] = { // forward s-box 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16}; uchar cpuSbox[256] = { // forward s-box 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16}; __device__ uchar InvSbox[256] = { // inverse s-box 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d}; // combined Xtimes2[Sbox[]] __device__ uchar Xtime2Sbox[256] = { 0xc6, 0xf8, 0xee, 0xf6, 0xff, 0xd6, 0xde, 0x91, 0x60, 0x02, 0xce, 0x56, 0xe7, 0xb5, 0x4d, 0xec, 0x8f, 0x1f, 0x89, 0xfa, 0xef, 0xb2, 0x8e, 0xfb, 0x41, 0xb3, 0x5f, 0x45, 0x23, 0x53, 0xe4, 0x9b, 0x75, 0xe1, 0x3d, 0x4c, 0x6c, 0x7e, 0xf5, 0x83, 0x68, 0x51, 0xd1, 0xf9, 0xe2, 0xab, 0x62, 0x2a, 0x08, 0x95, 0x46, 0x9d, 0x30, 0x37, 0x0a, 0x2f, 0x0e, 0x24, 0x1b, 0xdf, 0xcd, 0x4e, 0x7f, 0xea, 0x12, 0x1d, 0x58, 0x34, 0x36, 0xdc, 0xb4, 0x5b, 0xa4, 0x76, 0xb7, 0x7d, 0x52, 0xdd, 0x5e, 0x13, 0xa6, 0xb9, 0x00, 0xc1, 0x40, 0xe3, 0x79, 0xb6, 0xd4, 0x8d, 0x67, 0x72, 0x94, 0x98, 0xb0, 0x85, 0xbb, 0xc5, 0x4f, 0xed, 0x86, 0x9a, 0x66, 0x11, 0x8a, 0xe9, 0x04, 0xfe, 0xa0, 0x78, 0x25, 0x4b, 0xa2, 0x5d, 0x80, 0x05, 0x3f, 0x21, 0x70, 0xf1, 0x63, 0x77, 0xaf, 0x42, 0x20, 0xe5, 0xfd, 0xbf, 0x81, 0x18, 0x26, 0xc3, 0xbe, 0x35, 0x88, 0x2e, 0x93, 0x55, 0xfc, 0x7a, 0xc8, 0xba, 0x32, 0xe6, 0xc0, 0x19, 0x9e, 0xa3, 0x44, 0x54, 0x3b, 0x0b, 0x8c, 0xc7, 0x6b, 0x28, 0xa7, 0xbc, 0x16, 0xad, 0xdb, 0x64, 0x74, 0x14, 0x92, 0x0c, 0x48, 0xb8, 0x9f, 0xbd, 0x43, 0xc4, 0x39, 0x31, 0xd3, 0xf2, 0xd5, 0x8b, 0x6e, 0xda, 0x01, 0xb1, 0x9c, 0x49, 0xd8, 0xac, 0xf3, 0xcf, 0xca, 0xf4, 0x47, 0x10, 0x6f, 0xf0, 0x4a, 0x5c, 0x38, 0x57, 0x73, 0x97, 0xcb, 0xa1, 0xe8, 0x3e, 0x96, 0x61, 0x0d, 0x0f, 0xe0, 0x7c, 0x71, 0xcc, 0x90, 0x06, 0xf7, 0x1c, 0xc2, 0x6a, 0xae, 0x69, 0x17, 0x99, 0x3a, 0x27, 0xd9, 0xeb, 0x2b, 0x22, 0xd2, 0xa9, 0x07, 0x33, 0x2d, 0x3c, 0x15, 0xc9, 0x87, 0xaa, 0x50, 0xa5, 0x03, 0x59, 0x09, 0x1a, 0x65, 0xd7, 0x84, 0xd0, 0x82, 0x29, 0x5a, 0x1e, 0x7b, 0xa8, 0x6d, 0x2c }; // combined Xtimes3[Sbox[]] __device__ uchar Xtime3Sbox[256] = { 0xa5, 0x84, 0x99, 0x8d, 0x0d, 0xbd, 0xb1, 0x54, 0x50, 0x03, 0xa9, 0x7d, 0x19, 0x62, 0xe6, 0x9a, 0x45, 0x9d, 0x40, 0x87, 0x15, 0xeb, 0xc9, 0x0b, 0xec, 0x67, 0xfd, 0xea, 0xbf, 0xf7, 0x96, 0x5b, 0xc2, 0x1c, 0xae, 0x6a, 0x5a, 0x41, 0x02, 0x4f, 0x5c, 0xf4, 0x34, 0x08, 0x93, 0x73, 0x53, 0x3f, 0x0c, 0x52, 0x65, 0x5e, 0x28, 0xa1, 0x0f, 0xb5, 0x09, 0x36, 0x9b, 0x3d, 0x26, 0x69, 0xcd, 0x9f, 0x1b, 0x9e, 0x74, 0x2e, 0x2d, 0xb2, 0xee, 0xfb, 0xf6, 0x4d, 0x61, 0xce, 0x7b, 0x3e, 0x71, 0x97, 0xf5, 0x68, 0x00, 0x2c, 0x60, 0x1f, 0xc8, 0xed, 0xbe, 0x46, 0xd9, 0x4b, 0xde, 0xd4, 0xe8, 0x4a, 0x6b, 0x2a, 0xe5, 0x16, 0xc5, 0xd7, 0x55, 0x94, 0xcf, 0x10, 0x06, 0x81, 0xf0, 0x44, 0xba, 0xe3, 0xf3, 0xfe, 0xc0, 0x8a, 0xad, 0xbc, 0x48, 0x04, 0xdf, 0xc1, 0x75, 0x63, 0x30, 0x1a, 0x0e, 0x6d, 0x4c, 0x14, 0x35, 0x2f, 0xe1, 0xa2, 0xcc, 0x39, 0x57, 0xf2, 0x82, 0x47, 0xac, 0xe7, 0x2b, 0x95, 0xa0, 0x98, 0xd1, 0x7f, 0x66, 0x7e, 0xab, 0x83, 0xca, 0x29, 0xd3, 0x3c, 0x79, 0xe2, 0x1d, 0x76, 0x3b, 0x56, 0x4e, 0x1e, 0xdb, 0x0a, 0x6c, 0xe4, 0x5d, 0x6e, 0xef, 0xa6, 0xa8, 0xa4, 0x37, 0x8b, 0x32, 0x43, 0x59, 0xb7, 0x8c, 0x64, 0xd2, 0xe0, 0xb4, 0xfa, 0x07, 0x25, 0xaf, 0x8e, 0xe9, 0x18, 0xd5, 0x88, 0x6f, 0x72, 0x24, 0xf1, 0xc7, 0x51, 0x23, 0x7c, 0x9c, 0x21, 0xdd, 0xdc, 0x86, 0x85, 0x90, 0x42, 0xc4, 0xaa, 0xd8, 0x05, 0x01, 0x12, 0xa3, 0x5f, 0xf9, 0xd0, 0x91, 0x58, 0x27, 0xb9, 0x38, 0x13, 0xb3, 0x33, 0xbb, 0x70, 0x89, 0xa7, 0xb6, 0x22, 0x92, 0x20, 0x49, 0xff, 0x78, 0x7a, 0x8f, 0xf8, 0x80, 0x17, 0xda, 0x31, 0xc6, 0xb8, 0xc3, 0xb0, 0x77, 0x11, 0xcb, 0xfc, 0xd6, 0x3a }; // modular multiplication tables // based on: // Xtime2[x] = (x & 0x80 ? 0x1b : 0) ^ (x + x) // Xtime3[x] = x^Xtime2[x]; __device__ uchar Xtime2[256] = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e, 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e, 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe, 0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05, 0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25, 0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45, 0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65, 0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85, 0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5, 0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5, 0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5}; __device__ uchar Xtime9[256] = { 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77, 0x90, 0x99, 0x82, 0x8b, 0xb4, 0xbd, 0xa6, 0xaf, 0xd8, 0xd1, 0xca, 0xc3, 0xfc, 0xf5, 0xee, 0xe7, 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04, 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c, 0xab, 0xa2, 0xb9, 0xb0, 0x8f, 0x86, 0x9d, 0x94, 0xe3, 0xea, 0xf1, 0xf8, 0xc7, 0xce, 0xd5, 0xdc, 0x76, 0x7f, 0x64, 0x6d, 0x52, 0x5b, 0x40, 0x49, 0x3e, 0x37, 0x2c, 0x25, 0x1a, 0x13, 0x08, 0x01, 0xe6, 0xef, 0xf4, 0xfd, 0xc2, 0xcb, 0xd0, 0xd9, 0xae, 0xa7, 0xbc, 0xb5, 0x8a, 0x83, 0x98, 0x91, 0x4d, 0x44, 0x5f, 0x56, 0x69, 0x60, 0x7b, 0x72, 0x05, 0x0c, 0x17, 0x1e, 0x21, 0x28, 0x33, 0x3a, 0xdd, 0xd4, 0xcf, 0xc6, 0xf9, 0xf0, 0xeb, 0xe2, 0x95, 0x9c, 0x87, 0x8e, 0xb1, 0xb8, 0xa3, 0xaa, 0xec, 0xe5, 0xfe, 0xf7, 0xc8, 0xc1, 0xda, 0xd3, 0xa4, 0xad, 0xb6, 0xbf, 0x80, 0x89, 0x92, 0x9b, 0x7c, 0x75, 0x6e, 0x67, 0x58, 0x51, 0x4a, 0x43, 0x34, 0x3d, 0x26, 0x2f, 0x10, 0x19, 0x02, 0x0b, 0xd7, 0xde, 0xc5, 0xcc, 0xf3, 0xfa, 0xe1, 0xe8, 0x9f, 0x96, 0x8d, 0x84, 0xbb, 0xb2, 0xa9, 0xa0, 0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71, 0x78, 0x0f, 0x06, 0x1d, 0x14, 0x2b, 0x22, 0x39, 0x30, 0x9a, 0x93, 0x88, 0x81, 0xbe, 0xb7, 0xac, 0xa5, 0xd2, 0xdb, 0xc0, 0xc9, 0xf6, 0xff, 0xe4, 0xed, 0x0a, 0x03, 0x18, 0x11, 0x2e, 0x27, 0x3c, 0x35, 0x42, 0x4b, 0x50, 0x59, 0x66, 0x6f, 0x74, 0x7d, 0xa1, 0xa8, 0xb3, 0xba, 0x85, 0x8c, 0x97, 0x9e, 0xe9, 0xe0, 0xfb, 0xf2, 0xcd, 0xc4, 0xdf, 0xd6, 0x31, 0x38, 0x23, 0x2a, 0x15, 0x1c, 0x07, 0x0e, 0x79, 0x70, 0x6b, 0x62, 0x5d, 0x54, 0x4f, 0x46}; __device__ uchar XtimeB[256] = { 0x00, 0x0b, 0x16, 0x1d, 0x2c, 0x27, 0x3a, 0x31, 0x58, 0x53, 0x4e, 0x45, 0x74, 0x7f, 0x62, 0x69, 0xb0, 0xbb, 0xa6, 0xad, 0x9c, 0x97, 0x8a, 0x81, 0xe8, 0xe3, 0xfe, 0xf5, 0xc4, 0xcf, 0xd2, 0xd9, 0x7b, 0x70, 0x6d, 0x66, 0x57, 0x5c, 0x41, 0x4a, 0x23, 0x28, 0x35, 0x3e, 0x0f, 0x04, 0x19, 0x12, 0xcb, 0xc0, 0xdd, 0xd6, 0xe7, 0xec, 0xf1, 0xfa, 0x93, 0x98, 0x85, 0x8e, 0xbf, 0xb4, 0xa9, 0xa2, 0xf6, 0xfd, 0xe0, 0xeb, 0xda, 0xd1, 0xcc, 0xc7, 0xae, 0xa5, 0xb8, 0xb3, 0x82, 0x89, 0x94, 0x9f, 0x46, 0x4d, 0x50, 0x5b, 0x6a, 0x61, 0x7c, 0x77, 0x1e, 0x15, 0x08, 0x03, 0x32, 0x39, 0x24, 0x2f, 0x8d, 0x86, 0x9b, 0x90, 0xa1, 0xaa, 0xb7, 0xbc, 0xd5, 0xde, 0xc3, 0xc8, 0xf9, 0xf2, 0xef, 0xe4, 0x3d, 0x36, 0x2b, 0x20, 0x11, 0x1a, 0x07, 0x0c, 0x65, 0x6e, 0x73, 0x78, 0x49, 0x42, 0x5f, 0x54, 0xf7, 0xfc, 0xe1, 0xea, 0xdb, 0xd0, 0xcd, 0xc6, 0xaf, 0xa4, 0xb9, 0xb2, 0x83, 0x88, 0x95, 0x9e, 0x47, 0x4c, 0x51, 0x5a, 0x6b, 0x60, 0x7d, 0x76, 0x1f, 0x14, 0x09, 0x02, 0x33, 0x38, 0x25, 0x2e, 0x8c, 0x87, 0x9a, 0x91, 0xa0, 0xab, 0xb6, 0xbd, 0xd4, 0xdf, 0xc2, 0xc9, 0xf8, 0xf3, 0xee, 0xe5, 0x3c, 0x37, 0x2a, 0x21, 0x10, 0x1b, 0x06, 0x0d, 0x64, 0x6f, 0x72, 0x79, 0x48, 0x43, 0x5e, 0x55, 0x01, 0x0a, 0x17, 0x1c, 0x2d, 0x26, 0x3b, 0x30, 0x59, 0x52, 0x4f, 0x44, 0x75, 0x7e, 0x63, 0x68, 0xb1, 0xba, 0xa7, 0xac, 0x9d, 0x96, 0x8b, 0x80, 0xe9, 0xe2, 0xff, 0xf4, 0xc5, 0xce, 0xd3, 0xd8, 0x7a, 0x71, 0x6c, 0x67, 0x56, 0x5d, 0x40, 0x4b, 0x22, 0x29, 0x34, 0x3f, 0x0e, 0x05, 0x18, 0x13, 0xca, 0xc1, 0xdc, 0xd7, 0xe6, 0xed, 0xf0, 0xfb, 0x92, 0x99, 0x84, 0x8f, 0xbe, 0xb5, 0xa8, 0xa3}; __device__ uchar XtimeD[256] = { 0x00, 0x0d, 0x1a, 0x17, 0x34, 0x39, 0x2e, 0x23, 0x68, 0x65, 0x72, 0x7f, 0x5c, 0x51, 0x46, 0x4b, 0xd0, 0xdd, 0xca, 0xc7, 0xe4, 0xe9, 0xfe, 0xf3, 0xb8, 0xb5, 0xa2, 0xaf, 0x8c, 0x81, 0x96, 0x9b, 0xbb, 0xb6, 0xa1, 0xac, 0x8f, 0x82, 0x95, 0x98, 0xd3, 0xde, 0xc9, 0xc4, 0xe7, 0xea, 0xfd, 0xf0, 0x6b, 0x66, 0x71, 0x7c, 0x5f, 0x52, 0x45, 0x48, 0x03, 0x0e, 0x19, 0x14, 0x37, 0x3a, 0x2d, 0x20, 0x6d, 0x60, 0x77, 0x7a, 0x59, 0x54, 0x43, 0x4e, 0x05, 0x08, 0x1f, 0x12, 0x31, 0x3c, 0x2b, 0x26, 0xbd, 0xb0, 0xa7, 0xaa, 0x89, 0x84, 0x93, 0x9e, 0xd5, 0xd8, 0xcf, 0xc2, 0xe1, 0xec, 0xfb, 0xf6, 0xd6, 0xdb, 0xcc, 0xc1, 0xe2, 0xef, 0xf8, 0xf5, 0xbe, 0xb3, 0xa4, 0xa9, 0x8a, 0x87, 0x90, 0x9d, 0x06, 0x0b, 0x1c, 0x11, 0x32, 0x3f, 0x28, 0x25, 0x6e, 0x63, 0x74, 0x79, 0x5a, 0x57, 0x40, 0x4d, 0xda, 0xd7, 0xc0, 0xcd, 0xee, 0xe3, 0xf4, 0xf9, 0xb2, 0xbf, 0xa8, 0xa5, 0x86, 0x8b, 0x9c, 0x91, 0x0a, 0x07, 0x10, 0x1d, 0x3e, 0x33, 0x24, 0x29, 0x62, 0x6f, 0x78, 0x75, 0x56, 0x5b, 0x4c, 0x41, 0x61, 0x6c, 0x7b, 0x76, 0x55, 0x58, 0x4f, 0x42, 0x09, 0x04, 0x13, 0x1e, 0x3d, 0x30, 0x27, 0x2a, 0xb1, 0xbc, 0xab, 0xa6, 0x85, 0x88, 0x9f, 0x92, 0xd9, 0xd4, 0xc3, 0xce, 0xed, 0xe0, 0xf7, 0xfa, 0xb7, 0xba, 0xad, 0xa0, 0x83, 0x8e, 0x99, 0x94, 0xdf, 0xd2, 0xc5, 0xc8, 0xeb, 0xe6, 0xf1, 0xfc, 0x67, 0x6a, 0x7d, 0x70, 0x53, 0x5e, 0x49, 0x44, 0x0f, 0x02, 0x15, 0x18, 0x3b, 0x36, 0x21, 0x2c, 0x0c, 0x01, 0x16, 0x1b, 0x38, 0x35, 0x22, 0x2f, 0x64, 0x69, 0x7e, 0x73, 0x50, 0x5d, 0x4a, 0x47, 0xdc, 0xd1, 0xc6, 0xcb, 0xe8, 0xe5, 0xf2, 0xff, 0xb4, 0xb9, 0xae, 0xa3, 0x80, 0x8d, 0x9a, 0x97}; __device__ uchar XtimeE[256] = { 0x00, 0x0e, 0x1c, 0x12, 0x38, 0x36, 0x24, 0x2a, 0x70, 0x7e, 0x6c, 0x62, 0x48, 0x46, 0x54, 0x5a, 0xe0, 0xee, 0xfc, 0xf2, 0xd8, 0xd6, 0xc4, 0xca, 0x90, 0x9e, 0x8c, 0x82, 0xa8, 0xa6, 0xb4, 0xba, 0xdb, 0xd5, 0xc7, 0xc9, 0xe3, 0xed, 0xff, 0xf1, 0xab, 0xa5, 0xb7, 0xb9, 0x93, 0x9d, 0x8f, 0x81, 0x3b, 0x35, 0x27, 0x29, 0x03, 0x0d, 0x1f, 0x11, 0x4b, 0x45, 0x57, 0x59, 0x73, 0x7d, 0x6f, 0x61, 0xad, 0xa3, 0xb1, 0xbf, 0x95, 0x9b, 0x89, 0x87, 0xdd, 0xd3, 0xc1, 0xcf, 0xe5, 0xeb, 0xf9, 0xf7, 0x4d, 0x43, 0x51, 0x5f, 0x75, 0x7b, 0x69, 0x67, 0x3d, 0x33, 0x21, 0x2f, 0x05, 0x0b, 0x19, 0x17, 0x76, 0x78, 0x6a, 0x64, 0x4e, 0x40, 0x52, 0x5c, 0x06, 0x08, 0x1a, 0x14, 0x3e, 0x30, 0x22, 0x2c, 0x96, 0x98, 0x8a, 0x84, 0xae, 0xa0, 0xb2, 0xbc, 0xe6, 0xe8, 0xfa, 0xf4, 0xde, 0xd0, 0xc2, 0xcc, 0x41, 0x4f, 0x5d, 0x53, 0x79, 0x77, 0x65, 0x6b, 0x31, 0x3f, 0x2d, 0x23, 0x09, 0x07, 0x15, 0x1b, 0xa1, 0xaf, 0xbd, 0xb3, 0x99, 0x97, 0x85, 0x8b, 0xd1, 0xdf, 0xcd, 0xc3, 0xe9, 0xe7, 0xf5, 0xfb, 0x9a, 0x94, 0x86, 0x88, 0xa2, 0xac, 0xbe, 0xb0, 0xea, 0xe4, 0xf6, 0xf8, 0xd2, 0xdc, 0xce, 0xc0, 0x7a, 0x74, 0x66, 0x68, 0x42, 0x4c, 0x5e, 0x50, 0x0a, 0x04, 0x16, 0x18, 0x32, 0x3c, 0x2e, 0x20, 0xec, 0xe2, 0xf0, 0xfe, 0xd4, 0xda, 0xc8, 0xc6, 0x9c, 0x92, 0x80, 0x8e, 0xa4, 0xaa, 0xb8, 0xb6, 0x0c, 0x02, 0x10, 0x1e, 0x34, 0x3a, 0x28, 0x26, 0x7c, 0x72, 0x60, 0x6e, 0x44, 0x4a, 0x58, 0x56, 0x37, 0x39, 0x2b, 0x25, 0x0f, 0x01, 0x13, 0x1d, 0x47, 0x49, 0x5b, 0x55, 0x7f, 0x71, 0x63, 0x6d, 0xd7, 0xd9, 0xcb, 0xc5, 0xef, 0xe1, 0xf3, 0xfd, 0xa7, 0xa9, 0xbb, 0xb5, 0x9f, 0x91, 0x83, 0x8d}; // exchanges columns in each of 4 rows // row0 - unchanged, row1- shifted left 1, // row2 - shifted left 2 and row3 - shifted left 3 __device__ void ShiftRows (uchar *state) { uchar tmp; // just substitute row 0 state[0] = Sbox[state[0]], state[4] = Sbox[state[4]]; state[8] = Sbox[state[8]], state[12] = Sbox[state[12]]; // rotate row 1 tmp = Sbox[state[1]], state[1] = Sbox[state[5]]; state[5] = Sbox[state[9]], state[9] = Sbox[state[13]], state[13] = tmp; // rotate row 2 tmp = Sbox[state[2]], state[2] = Sbox[state[10]], state[10] = tmp; tmp = Sbox[state[6]], state[6] = Sbox[state[14]], state[14] = tmp; // rotate row 3 tmp = Sbox[state[15]], state[15] = Sbox[state[11]]; state[11] = Sbox[state[7]], state[7] = Sbox[state[3]], state[3] = tmp; } // restores columns in each of 4 rows // row0 - unchanged, row1- shifted right 1, // row2 - shifted right 2 and row3 - shifted right 3 __device__ void InvShiftRows (uchar *state) { uchar tmp; // restore row 0 state[0] = InvSbox[state[0]], state[4] = InvSbox[state[4]]; state[8] = InvSbox[state[8]], state[12] = InvSbox[state[12]]; // restore row 1 tmp = InvSbox[state[13]], state[13] = InvSbox[state[9]]; state[9] = InvSbox[state[5]], state[5] = InvSbox[state[1]], state[1] = tmp; // restore row 2 tmp = InvSbox[state[2]], state[2] = InvSbox[state[10]], state[10] = tmp; tmp = InvSbox[state[6]], state[6] = InvSbox[state[14]], state[14] = tmp; // restore row 3 tmp = InvSbox[state[3]], state[3] = InvSbox[state[7]]; state[7] = InvSbox[state[11]], state[11] = InvSbox[state[15]], state[15] = tmp; } // recombine and mix each row in a column __device__ void MixSubColumns (uchar *state) { uchar tmp[4 * Nb]; // mixing column 0 tmp[0] = Xtime2Sbox[state[0]] ^ Xtime3Sbox[state[5]] ^ Sbox[state[10]] ^ Sbox[state[15]]; tmp[1] = Sbox[state[0]] ^ Xtime2Sbox[state[5]] ^ Xtime3Sbox[state[10]] ^ Sbox[state[15]]; tmp[2] = Sbox[state[0]] ^ Sbox[state[5]] ^ Xtime2Sbox[state[10]] ^ Xtime3Sbox[state[15]]; tmp[3] = Xtime3Sbox[state[0]] ^ Sbox[state[5]] ^ Sbox[state[10]] ^ Xtime2Sbox[state[15]]; // mixing column 1 tmp[4] = Xtime2Sbox[state[4]] ^ Xtime3Sbox[state[9]] ^ Sbox[state[14]] ^ Sbox[state[3]]; tmp[5] = Sbox[state[4]] ^ Xtime2Sbox[state[9]] ^ Xtime3Sbox[state[14]] ^ Sbox[state[3]]; tmp[6] = Sbox[state[4]] ^ Sbox[state[9]] ^ Xtime2Sbox[state[14]] ^ Xtime3Sbox[state[3]]; tmp[7] = Xtime3Sbox[state[4]] ^ Sbox[state[9]] ^ Sbox[state[14]] ^ Xtime2Sbox[state[3]]; // mixing column 2 tmp[8] = Xtime2Sbox[state[8]] ^ Xtime3Sbox[state[13]] ^ Sbox[state[2]] ^ Sbox[state[7]]; tmp[9] = Sbox[state[8]] ^ Xtime2Sbox[state[13]] ^ Xtime3Sbox[state[2]] ^ Sbox[state[7]]; tmp[10] = Sbox[state[8]] ^ Sbox[state[13]] ^ Xtime2Sbox[state[2]] ^ Xtime3Sbox[state[7]]; tmp[11] = Xtime3Sbox[state[8]] ^ Sbox[state[13]] ^ Sbox[state[2]] ^ Xtime2Sbox[state[7]]; // mixing column 3 tmp[12] = Xtime2Sbox[state[12]] ^ Xtime3Sbox[state[1]] ^ Sbox[state[6]] ^ Sbox[state[11]]; tmp[13] = Sbox[state[12]] ^ Xtime2Sbox[state[1]] ^ Xtime3Sbox[state[6]] ^ Sbox[state[11]]; tmp[14] = Sbox[state[12]] ^ Sbox[state[1]] ^ Xtime2Sbox[state[6]] ^ Xtime3Sbox[state[11]]; tmp[15] = Xtime3Sbox[state[12]] ^ Sbox[state[1]] ^ Sbox[state[6]] ^ Xtime2Sbox[state[11]]; memcpy (state, tmp, sizeof(tmp)); } // restore and un-mix each row in a column __device__ void InvMixSubColumns (uchar *state) { uchar tmp[4 * Nb]; int i; // restore column 0 tmp[0] = XtimeE[state[0]] ^ XtimeB[state[1]] ^ XtimeD[state[2]] ^ Xtime9[state[3]]; tmp[5] = Xtime9[state[0]] ^ XtimeE[state[1]] ^ XtimeB[state[2]] ^ XtimeD[state[3]]; tmp[10] = XtimeD[state[0]] ^ Xtime9[state[1]] ^ XtimeE[state[2]] ^ XtimeB[state[3]]; tmp[15] = XtimeB[state[0]] ^ XtimeD[state[1]] ^ Xtime9[state[2]] ^ XtimeE[state[3]]; // restore column 1 tmp[4] = XtimeE[state[4]] ^ XtimeB[state[5]] ^ XtimeD[state[6]] ^ Xtime9[state[7]]; tmp[9] = Xtime9[state[4]] ^ XtimeE[state[5]] ^ XtimeB[state[6]] ^ XtimeD[state[7]]; tmp[14] = XtimeD[state[4]] ^ Xtime9[state[5]] ^ XtimeE[state[6]] ^ XtimeB[state[7]]; tmp[3] = XtimeB[state[4]] ^ XtimeD[state[5]] ^ Xtime9[state[6]] ^ XtimeE[state[7]]; // restore column 2 tmp[8] = XtimeE[state[8]] ^ XtimeB[state[9]] ^ XtimeD[state[10]] ^ Xtime9[state[11]]; tmp[13] = Xtime9[state[8]] ^ XtimeE[state[9]] ^ XtimeB[state[10]] ^ XtimeD[state[11]]; tmp[2] = XtimeD[state[8]] ^ Xtime9[state[9]] ^ XtimeE[state[10]] ^ XtimeB[state[11]]; tmp[7] = XtimeB[state[8]] ^ XtimeD[state[9]] ^ Xtime9[state[10]] ^ XtimeE[state[11]]; // restore column 3 tmp[12] = XtimeE[state[12]] ^ XtimeB[state[13]] ^ XtimeD[state[14]] ^ Xtime9[state[15]]; tmp[1] = Xtime9[state[12]] ^ XtimeE[state[13]] ^ XtimeB[state[14]] ^ XtimeD[state[15]]; tmp[6] = XtimeD[state[12]] ^ Xtime9[state[13]] ^ XtimeE[state[14]] ^ XtimeB[state[15]]; tmp[11] = XtimeB[state[12]] ^ XtimeD[state[13]] ^ Xtime9[state[14]] ^ XtimeE[state[15]]; for( i=0; i < 4 * Nb; i++ ) state[i] = InvSbox[tmp[i]]; } // encrypt/decrypt columns of the key // n.b. you can replace this with // byte-wise xor if you wish. __device__ void AddRoundKey (unsigned *state, unsigned *key) { int idx; for( idx = 0; idx < 4; idx++ ) state[idx] ^= key[idx]; } uchar Rcon[11] = { 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36}; // produce Nb bytes for each round void ExpandKey (uchar *key, uchar *expkey) { uchar tmp0, tmp1, tmp2, tmp3, tmp4; unsigned idx; memcpy (expkey, key, Nk * 4); for( idx = Nk; idx < Nb * (Nr + 1); idx++ ) { tmp0 = expkey[4*idx - 4]; tmp1 = expkey[4*idx - 3]; tmp2 = expkey[4*idx - 2]; tmp3 = expkey[4*idx - 1]; if( !(idx % Nk) ) { tmp4 = tmp3; tmp3 = cpuSbox[tmp0]; tmp0 = cpuSbox[tmp1] ^ Rcon[idx/Nk]; tmp1 = cpuSbox[tmp2]; tmp2 = cpuSbox[tmp4]; } else if( Nk > 6 && idx % Nk == 4 ) { tmp0 = cpuSbox[tmp0]; tmp1 = cpuSbox[tmp1]; tmp2 = cpuSbox[tmp2]; tmp3 = cpuSbox[tmp3]; } expkey[4*idx+0] = expkey[4*idx - 4*Nk + 0] ^ tmp0; expkey[4*idx+1] = expkey[4*idx - 4*Nk + 1] ^ tmp1; expkey[4*idx+2] = expkey[4*idx - 4*Nk + 2] ^ tmp2; expkey[4*idx+3] = expkey[4*idx - 4*Nk + 3] ^ tmp3; } } // encrypt one 128 bit block /*void Encrypt (uchar *in, uchar *expkey, uchar *out) { uchar state[Nb * 4]; unsigned round; memcpy (state, in, Nb * 4); AddRoundKey ((unsigned *)state, (unsigned *)expkey); for( round = 1; round < Nr + 1; round++ ) { if( round < Nr ) MixSubColumns (state); else ShiftRows (state); AddRoundKey ((unsigned *)state, (unsigned *)expkey + round * Nb); } memcpy (out, state, sizeof(state)); }*/ /*void Decrypt (uchar *in, uchar *expkey, uchar *out) { uchar state[Nb * 4]; unsigned round; memcpy (state, in, sizeof(state)); AddRoundKey ((unsigned *)state, (unsigned *)expkey + Nr * Nb); InvShiftRows(state); for( round = Nr; round--; ) { AddRoundKey ((unsigned *)state, (unsigned *)expkey + round * Nb); if( round ) InvMixSubColumns (state); } memcpy (out, state, sizeof(state)); }*/ #include <stdio.h> #include <fcntl.h> uchar* in = (uchar *) malloc(BUF_SIZE); uchar key[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f}; uchar* out = (uchar *) malloc(BUF_SIZE); #ifndef unix void rd_clock (__int64 *ans) { unsigned dwLow, dwHigh; __asm { rdtsc mov dwLow, eax mov dwHigh, edx } *ans = (__int64)dwHigh << 32 | (__int64)dwLow; } #else typedef long long __int64; void rd_clock (__int64 *ans) { unsigned long long dwBoth; __asm__ volatile(".byte 0x0f, 0x31" : "=A"(dwBoth)); *ans = dwBoth; } #endif inline unsigned long int monotonicTime(void) { const unsigned long int NS_PER_SEC = 1000 * 1000 * 1000; struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); return now.tv_sec * NS_PER_SEC + now.tv_nsec; } /*void decrypt (char *mykey, char *name) { uchar expkey[4 * Nb * (Nr + 1)]; FILE *fd = fopen (name, "rb"); int ch, idx = 0; strncpy ((char *) key, mykey, sizeof(key)); ExpandKey (key, expkey); while( ch = getc(fd), ch != EOF ) { in[idx++] = ch; if( idx % 16 ) continue; Decrypt (in, expkey, out); for( idx = 0; idx < 16; idx++ ) putchar (out[idx]); idx = 0; } }*/ __global__ void gpuEncrypt(uchar* in, uchar* expkey, uchar *out, int idx) { uchar state[4 * Nb]; // array of 16 //memcpy (state, in, Nb * 4); //int numOfBlockKey = idx/16; //threadIdx.x*16 for(int i = 16*threadIdx.x+ 16*blockIdx.x*blockDim.x; i < idx; i += 16*blockDim.x*gridDim.x) { //printf("testing gpuIn %d", in[i]); memcpy (state, &in[i], Nb * 4); //threadIdx.x*16+ 16*blockIdx.x*blockDim.x; 16 is number of data that each thread handles AddRoundKey ((unsigned *)state, (unsigned *)expkey); for( unsigned round = 1; round < Nr + 1; round++ ) { if( round < Nr ) MixSubColumns (state); else ShiftRows (state); AddRoundKey ((unsigned *)state, (unsigned *)expkey + round * Nb); } memcpy (&out[i], state, sizeof(state)); //16*blockIdx.x*blockDim.x+threadIdx.x*16 } //printf("testing gpuOut %d", out[0]); } void encrypt (char *mykey, char *name) { uchar expkey[4 * Nb * (Nr + 1)]; FILE *fd = fopen (name, "rb"); int ch, idx = 0; strncpy ((char *)key, mykey, sizeof(key)); ExpandKey (key, expkey); while( ch = getc(fd), ch != EOF ) // insert all cha into in[] arr { if(idx == BUF_SIZE) { cout <<"File is Too Big!"<<endl; exit(0); } in[idx++] = ch; } if(idx > 0 ) while(idx % 16 != 0) in[idx++] = 0; else return; //allocate Memory at CPU uchar* gpuIn; uchar* gpuExpkey; uchar* gpuOut; hipMalloc(&gpuIn, BUF_SIZE*sizeof(uchar)); hipMalloc(&gpuOut, BUF_SIZE*sizeof(uchar)); hipMalloc(&gpuExpkey, 4 * Nb * (Nr + 1)*sizeof(uchar)); //copy data from host to device hipMemcpy(gpuIn, in, BUF_SIZE*sizeof(uchar), hipMemcpyHostToDevice); hipMemcpy(gpuExpkey, expkey, 4 * Nb * (Nr + 1)*sizeof(uchar), hipMemcpyHostToDevice); // Cuda running hipLaunchKernelGGL(( gpuEncrypt), dim3(14),dim3(256), 0, 0, gpuIn, gpuExpkey, gpuOut, idx); // copy to out from device to host hipMemcpy(out, gpuOut, BUF_SIZE * sizeof(uchar) , hipMemcpyDeviceToHost); for(int i = 0; i < idx; i++) putchar(out[i]); } uchar expkey[4 * Nb * (Nr + 1)]; void mrandom (int, char *); unsigned xrandom (void); int aescycles () { __int64 start, end; int t; do { rd_clock(&start); //Encrypt (in, expkey, out); rd_clock (&end); t = end - start; } while( t<= 0 || t>= 4000); return t; } int bestx (int b, int loops) { int bestx = 0, bestxt = 0; int x, xt, i, j; for( x = 0; x < 256; x++ ) { xt = 0; for( i = 0; i < loops; i++ ) { for( j = 0; j < 16; j++ ) in[j] = xrandom() >> 16; in[b] = x; xt += aescycles(); xt += aescycles(); xt += aescycles(); xt += aescycles(); xt += aescycles(); } if( xt > bestxt ) bestx = x, bestxt = xt; } return bestx; } int main (int argc, char *argv[]) { #ifndef unix extern int __cdecl _setmode (int, int); _setmode (_fileno(stdout), _O_BINARY); #endif switch( argv[1][0] ) { //case 'c': certify(); break; case 'e': encrypt(argv[2], argv[3]); break; //case 'd': decrypt(argv[2], argv[3]); break;; //case 's': sample(); break; } } /* * The package generates far better random numbers than a linear * congruential generator. The random number generation technique * is a linear feedback shift register approach. In this approach, * the least significant bit of all the numbers in the RandTbl table * will act as a linear feedback shift register, and will have period * of approximately 2^96 - 1. * */ #define RAND_order (7 * sizeof(unsigned)) #define RAND_size (96 * sizeof(unsigned)) uchar RandTbl[RAND_size + RAND_order]; int RandHead = 0; /* * random: x**96 + x**7 + x**6 + x**4 + x**3 + x**2 + 1 * * The basic operation is to add to the number at the head index * the XOR sum of the lower order terms in the polynomial. * Then the index is advanced to the next location cyclically * in the table. The value returned is the sum generated. * */ unsigned xrandom () { register unsigned fact; if( (RandHead -= sizeof(unsigned)) < 0 ) { RandHead = RAND_size - sizeof(unsigned); memcpy (RandTbl + RAND_size, RandTbl, RAND_order); } fact = *(unsigned *)(RandTbl + RandHead + 7 * sizeof(unsigned)); fact ^= *(unsigned *)(RandTbl + RandHead + 6 * sizeof(unsigned)); fact ^= *(unsigned *)(RandTbl + RandHead + 4 * sizeof(unsigned)); fact ^= *(unsigned *)(RandTbl + RandHead + 3 * sizeof(unsigned)); fact ^= *(unsigned *)(RandTbl + RandHead + 2 * sizeof(unsigned)); return *(unsigned *)(RandTbl + RandHead) += fact; } /* * mrandom: * Initialize the random number generator based on the given seed. * */ void mrandom (int len, char *ptr) { unsigned short rand = *ptr; int idx, bit = len * 4; memset (RandTbl, 0, sizeof(RandTbl)); RandHead = 0; while( rand *= 20077, rand += 11, bit-- ) if( ptr[bit >> 2] & (1 << (bit & 3)) ) for (idx = 0; idx < 5; idx++) { rand *= 20077, rand += 11; RandTbl[rand % 96 << 2] ^= 1; } for( idx = 0; idx < 96 * 63; idx++ ) xrandom (); }
386e72f8a62d96d872304f2e18e73862a5f1a060.cu
// advanced encryption standard // author: karl malbrain, [email protected] /* This work, including the source code, documentation and related data, is placed into the public domain. The orginal author is Karl Malbrain. THIS SOFTWARE IS PROVIDED AS-IS WITHOUT WARRANTY OF ANY KIND, NOT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY. THE AUTHOR OF THIS SOFTWARE, ASSUMES _NO_ RESPONSIBILITY FOR ANY CONSEQUENCE RESULTING FROM THE USE, MODIFICATION, OR REDISTRIBUTION OF THIS SOFTWARE. */ typedef unsigned char uchar; #include <string.h> #include <memory.h> #include <iostream> using namespace std; // AES only supports Nb=4 #define Nb 4 // number of columns in the state & expanded key #define Nk 4 // number of columns in a key #define Nr 10 // number of rounds in encryption const size_t BUF_SIZE = 1000000000; __device__ uchar Sbox[256] = { // forward s-box 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16}; uchar cpuSbox[256] = { // forward s-box 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16}; __device__ uchar InvSbox[256] = { // inverse s-box 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d}; // combined Xtimes2[Sbox[]] __device__ uchar Xtime2Sbox[256] = { 0xc6, 0xf8, 0xee, 0xf6, 0xff, 0xd6, 0xde, 0x91, 0x60, 0x02, 0xce, 0x56, 0xe7, 0xb5, 0x4d, 0xec, 0x8f, 0x1f, 0x89, 0xfa, 0xef, 0xb2, 0x8e, 0xfb, 0x41, 0xb3, 0x5f, 0x45, 0x23, 0x53, 0xe4, 0x9b, 0x75, 0xe1, 0x3d, 0x4c, 0x6c, 0x7e, 0xf5, 0x83, 0x68, 0x51, 0xd1, 0xf9, 0xe2, 0xab, 0x62, 0x2a, 0x08, 0x95, 0x46, 0x9d, 0x30, 0x37, 0x0a, 0x2f, 0x0e, 0x24, 0x1b, 0xdf, 0xcd, 0x4e, 0x7f, 0xea, 0x12, 0x1d, 0x58, 0x34, 0x36, 0xdc, 0xb4, 0x5b, 0xa4, 0x76, 0xb7, 0x7d, 0x52, 0xdd, 0x5e, 0x13, 0xa6, 0xb9, 0x00, 0xc1, 0x40, 0xe3, 0x79, 0xb6, 0xd4, 0x8d, 0x67, 0x72, 0x94, 0x98, 0xb0, 0x85, 0xbb, 0xc5, 0x4f, 0xed, 0x86, 0x9a, 0x66, 0x11, 0x8a, 0xe9, 0x04, 0xfe, 0xa0, 0x78, 0x25, 0x4b, 0xa2, 0x5d, 0x80, 0x05, 0x3f, 0x21, 0x70, 0xf1, 0x63, 0x77, 0xaf, 0x42, 0x20, 0xe5, 0xfd, 0xbf, 0x81, 0x18, 0x26, 0xc3, 0xbe, 0x35, 0x88, 0x2e, 0x93, 0x55, 0xfc, 0x7a, 0xc8, 0xba, 0x32, 0xe6, 0xc0, 0x19, 0x9e, 0xa3, 0x44, 0x54, 0x3b, 0x0b, 0x8c, 0xc7, 0x6b, 0x28, 0xa7, 0xbc, 0x16, 0xad, 0xdb, 0x64, 0x74, 0x14, 0x92, 0x0c, 0x48, 0xb8, 0x9f, 0xbd, 0x43, 0xc4, 0x39, 0x31, 0xd3, 0xf2, 0xd5, 0x8b, 0x6e, 0xda, 0x01, 0xb1, 0x9c, 0x49, 0xd8, 0xac, 0xf3, 0xcf, 0xca, 0xf4, 0x47, 0x10, 0x6f, 0xf0, 0x4a, 0x5c, 0x38, 0x57, 0x73, 0x97, 0xcb, 0xa1, 0xe8, 0x3e, 0x96, 0x61, 0x0d, 0x0f, 0xe0, 0x7c, 0x71, 0xcc, 0x90, 0x06, 0xf7, 0x1c, 0xc2, 0x6a, 0xae, 0x69, 0x17, 0x99, 0x3a, 0x27, 0xd9, 0xeb, 0x2b, 0x22, 0xd2, 0xa9, 0x07, 0x33, 0x2d, 0x3c, 0x15, 0xc9, 0x87, 0xaa, 0x50, 0xa5, 0x03, 0x59, 0x09, 0x1a, 0x65, 0xd7, 0x84, 0xd0, 0x82, 0x29, 0x5a, 0x1e, 0x7b, 0xa8, 0x6d, 0x2c }; // combined Xtimes3[Sbox[]] __device__ uchar Xtime3Sbox[256] = { 0xa5, 0x84, 0x99, 0x8d, 0x0d, 0xbd, 0xb1, 0x54, 0x50, 0x03, 0xa9, 0x7d, 0x19, 0x62, 0xe6, 0x9a, 0x45, 0x9d, 0x40, 0x87, 0x15, 0xeb, 0xc9, 0x0b, 0xec, 0x67, 0xfd, 0xea, 0xbf, 0xf7, 0x96, 0x5b, 0xc2, 0x1c, 0xae, 0x6a, 0x5a, 0x41, 0x02, 0x4f, 0x5c, 0xf4, 0x34, 0x08, 0x93, 0x73, 0x53, 0x3f, 0x0c, 0x52, 0x65, 0x5e, 0x28, 0xa1, 0x0f, 0xb5, 0x09, 0x36, 0x9b, 0x3d, 0x26, 0x69, 0xcd, 0x9f, 0x1b, 0x9e, 0x74, 0x2e, 0x2d, 0xb2, 0xee, 0xfb, 0xf6, 0x4d, 0x61, 0xce, 0x7b, 0x3e, 0x71, 0x97, 0xf5, 0x68, 0x00, 0x2c, 0x60, 0x1f, 0xc8, 0xed, 0xbe, 0x46, 0xd9, 0x4b, 0xde, 0xd4, 0xe8, 0x4a, 0x6b, 0x2a, 0xe5, 0x16, 0xc5, 0xd7, 0x55, 0x94, 0xcf, 0x10, 0x06, 0x81, 0xf0, 0x44, 0xba, 0xe3, 0xf3, 0xfe, 0xc0, 0x8a, 0xad, 0xbc, 0x48, 0x04, 0xdf, 0xc1, 0x75, 0x63, 0x30, 0x1a, 0x0e, 0x6d, 0x4c, 0x14, 0x35, 0x2f, 0xe1, 0xa2, 0xcc, 0x39, 0x57, 0xf2, 0x82, 0x47, 0xac, 0xe7, 0x2b, 0x95, 0xa0, 0x98, 0xd1, 0x7f, 0x66, 0x7e, 0xab, 0x83, 0xca, 0x29, 0xd3, 0x3c, 0x79, 0xe2, 0x1d, 0x76, 0x3b, 0x56, 0x4e, 0x1e, 0xdb, 0x0a, 0x6c, 0xe4, 0x5d, 0x6e, 0xef, 0xa6, 0xa8, 0xa4, 0x37, 0x8b, 0x32, 0x43, 0x59, 0xb7, 0x8c, 0x64, 0xd2, 0xe0, 0xb4, 0xfa, 0x07, 0x25, 0xaf, 0x8e, 0xe9, 0x18, 0xd5, 0x88, 0x6f, 0x72, 0x24, 0xf1, 0xc7, 0x51, 0x23, 0x7c, 0x9c, 0x21, 0xdd, 0xdc, 0x86, 0x85, 0x90, 0x42, 0xc4, 0xaa, 0xd8, 0x05, 0x01, 0x12, 0xa3, 0x5f, 0xf9, 0xd0, 0x91, 0x58, 0x27, 0xb9, 0x38, 0x13, 0xb3, 0x33, 0xbb, 0x70, 0x89, 0xa7, 0xb6, 0x22, 0x92, 0x20, 0x49, 0xff, 0x78, 0x7a, 0x8f, 0xf8, 0x80, 0x17, 0xda, 0x31, 0xc6, 0xb8, 0xc3, 0xb0, 0x77, 0x11, 0xcb, 0xfc, 0xd6, 0x3a }; // modular multiplication tables // based on: // Xtime2[x] = (x & 0x80 ? 0x1b : 0) ^ (x + x) // Xtime3[x] = x^Xtime2[x]; __device__ uchar Xtime2[256] = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e, 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e, 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe, 0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05, 0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25, 0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45, 0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65, 0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85, 0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5, 0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5, 0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5}; __device__ uchar Xtime9[256] = { 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77, 0x90, 0x99, 0x82, 0x8b, 0xb4, 0xbd, 0xa6, 0xaf, 0xd8, 0xd1, 0xca, 0xc3, 0xfc, 0xf5, 0xee, 0xe7, 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04, 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c, 0xab, 0xa2, 0xb9, 0xb0, 0x8f, 0x86, 0x9d, 0x94, 0xe3, 0xea, 0xf1, 0xf8, 0xc7, 0xce, 0xd5, 0xdc, 0x76, 0x7f, 0x64, 0x6d, 0x52, 0x5b, 0x40, 0x49, 0x3e, 0x37, 0x2c, 0x25, 0x1a, 0x13, 0x08, 0x01, 0xe6, 0xef, 0xf4, 0xfd, 0xc2, 0xcb, 0xd0, 0xd9, 0xae, 0xa7, 0xbc, 0xb5, 0x8a, 0x83, 0x98, 0x91, 0x4d, 0x44, 0x5f, 0x56, 0x69, 0x60, 0x7b, 0x72, 0x05, 0x0c, 0x17, 0x1e, 0x21, 0x28, 0x33, 0x3a, 0xdd, 0xd4, 0xcf, 0xc6, 0xf9, 0xf0, 0xeb, 0xe2, 0x95, 0x9c, 0x87, 0x8e, 0xb1, 0xb8, 0xa3, 0xaa, 0xec, 0xe5, 0xfe, 0xf7, 0xc8, 0xc1, 0xda, 0xd3, 0xa4, 0xad, 0xb6, 0xbf, 0x80, 0x89, 0x92, 0x9b, 0x7c, 0x75, 0x6e, 0x67, 0x58, 0x51, 0x4a, 0x43, 0x34, 0x3d, 0x26, 0x2f, 0x10, 0x19, 0x02, 0x0b, 0xd7, 0xde, 0xc5, 0xcc, 0xf3, 0xfa, 0xe1, 0xe8, 0x9f, 0x96, 0x8d, 0x84, 0xbb, 0xb2, 0xa9, 0xa0, 0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71, 0x78, 0x0f, 0x06, 0x1d, 0x14, 0x2b, 0x22, 0x39, 0x30, 0x9a, 0x93, 0x88, 0x81, 0xbe, 0xb7, 0xac, 0xa5, 0xd2, 0xdb, 0xc0, 0xc9, 0xf6, 0xff, 0xe4, 0xed, 0x0a, 0x03, 0x18, 0x11, 0x2e, 0x27, 0x3c, 0x35, 0x42, 0x4b, 0x50, 0x59, 0x66, 0x6f, 0x74, 0x7d, 0xa1, 0xa8, 0xb3, 0xba, 0x85, 0x8c, 0x97, 0x9e, 0xe9, 0xe0, 0xfb, 0xf2, 0xcd, 0xc4, 0xdf, 0xd6, 0x31, 0x38, 0x23, 0x2a, 0x15, 0x1c, 0x07, 0x0e, 0x79, 0x70, 0x6b, 0x62, 0x5d, 0x54, 0x4f, 0x46}; __device__ uchar XtimeB[256] = { 0x00, 0x0b, 0x16, 0x1d, 0x2c, 0x27, 0x3a, 0x31, 0x58, 0x53, 0x4e, 0x45, 0x74, 0x7f, 0x62, 0x69, 0xb0, 0xbb, 0xa6, 0xad, 0x9c, 0x97, 0x8a, 0x81, 0xe8, 0xe3, 0xfe, 0xf5, 0xc4, 0xcf, 0xd2, 0xd9, 0x7b, 0x70, 0x6d, 0x66, 0x57, 0x5c, 0x41, 0x4a, 0x23, 0x28, 0x35, 0x3e, 0x0f, 0x04, 0x19, 0x12, 0xcb, 0xc0, 0xdd, 0xd6, 0xe7, 0xec, 0xf1, 0xfa, 0x93, 0x98, 0x85, 0x8e, 0xbf, 0xb4, 0xa9, 0xa2, 0xf6, 0xfd, 0xe0, 0xeb, 0xda, 0xd1, 0xcc, 0xc7, 0xae, 0xa5, 0xb8, 0xb3, 0x82, 0x89, 0x94, 0x9f, 0x46, 0x4d, 0x50, 0x5b, 0x6a, 0x61, 0x7c, 0x77, 0x1e, 0x15, 0x08, 0x03, 0x32, 0x39, 0x24, 0x2f, 0x8d, 0x86, 0x9b, 0x90, 0xa1, 0xaa, 0xb7, 0xbc, 0xd5, 0xde, 0xc3, 0xc8, 0xf9, 0xf2, 0xef, 0xe4, 0x3d, 0x36, 0x2b, 0x20, 0x11, 0x1a, 0x07, 0x0c, 0x65, 0x6e, 0x73, 0x78, 0x49, 0x42, 0x5f, 0x54, 0xf7, 0xfc, 0xe1, 0xea, 0xdb, 0xd0, 0xcd, 0xc6, 0xaf, 0xa4, 0xb9, 0xb2, 0x83, 0x88, 0x95, 0x9e, 0x47, 0x4c, 0x51, 0x5a, 0x6b, 0x60, 0x7d, 0x76, 0x1f, 0x14, 0x09, 0x02, 0x33, 0x38, 0x25, 0x2e, 0x8c, 0x87, 0x9a, 0x91, 0xa0, 0xab, 0xb6, 0xbd, 0xd4, 0xdf, 0xc2, 0xc9, 0xf8, 0xf3, 0xee, 0xe5, 0x3c, 0x37, 0x2a, 0x21, 0x10, 0x1b, 0x06, 0x0d, 0x64, 0x6f, 0x72, 0x79, 0x48, 0x43, 0x5e, 0x55, 0x01, 0x0a, 0x17, 0x1c, 0x2d, 0x26, 0x3b, 0x30, 0x59, 0x52, 0x4f, 0x44, 0x75, 0x7e, 0x63, 0x68, 0xb1, 0xba, 0xa7, 0xac, 0x9d, 0x96, 0x8b, 0x80, 0xe9, 0xe2, 0xff, 0xf4, 0xc5, 0xce, 0xd3, 0xd8, 0x7a, 0x71, 0x6c, 0x67, 0x56, 0x5d, 0x40, 0x4b, 0x22, 0x29, 0x34, 0x3f, 0x0e, 0x05, 0x18, 0x13, 0xca, 0xc1, 0xdc, 0xd7, 0xe6, 0xed, 0xf0, 0xfb, 0x92, 0x99, 0x84, 0x8f, 0xbe, 0xb5, 0xa8, 0xa3}; __device__ uchar XtimeD[256] = { 0x00, 0x0d, 0x1a, 0x17, 0x34, 0x39, 0x2e, 0x23, 0x68, 0x65, 0x72, 0x7f, 0x5c, 0x51, 0x46, 0x4b, 0xd0, 0xdd, 0xca, 0xc7, 0xe4, 0xe9, 0xfe, 0xf3, 0xb8, 0xb5, 0xa2, 0xaf, 0x8c, 0x81, 0x96, 0x9b, 0xbb, 0xb6, 0xa1, 0xac, 0x8f, 0x82, 0x95, 0x98, 0xd3, 0xde, 0xc9, 0xc4, 0xe7, 0xea, 0xfd, 0xf0, 0x6b, 0x66, 0x71, 0x7c, 0x5f, 0x52, 0x45, 0x48, 0x03, 0x0e, 0x19, 0x14, 0x37, 0x3a, 0x2d, 0x20, 0x6d, 0x60, 0x77, 0x7a, 0x59, 0x54, 0x43, 0x4e, 0x05, 0x08, 0x1f, 0x12, 0x31, 0x3c, 0x2b, 0x26, 0xbd, 0xb0, 0xa7, 0xaa, 0x89, 0x84, 0x93, 0x9e, 0xd5, 0xd8, 0xcf, 0xc2, 0xe1, 0xec, 0xfb, 0xf6, 0xd6, 0xdb, 0xcc, 0xc1, 0xe2, 0xef, 0xf8, 0xf5, 0xbe, 0xb3, 0xa4, 0xa9, 0x8a, 0x87, 0x90, 0x9d, 0x06, 0x0b, 0x1c, 0x11, 0x32, 0x3f, 0x28, 0x25, 0x6e, 0x63, 0x74, 0x79, 0x5a, 0x57, 0x40, 0x4d, 0xda, 0xd7, 0xc0, 0xcd, 0xee, 0xe3, 0xf4, 0xf9, 0xb2, 0xbf, 0xa8, 0xa5, 0x86, 0x8b, 0x9c, 0x91, 0x0a, 0x07, 0x10, 0x1d, 0x3e, 0x33, 0x24, 0x29, 0x62, 0x6f, 0x78, 0x75, 0x56, 0x5b, 0x4c, 0x41, 0x61, 0x6c, 0x7b, 0x76, 0x55, 0x58, 0x4f, 0x42, 0x09, 0x04, 0x13, 0x1e, 0x3d, 0x30, 0x27, 0x2a, 0xb1, 0xbc, 0xab, 0xa6, 0x85, 0x88, 0x9f, 0x92, 0xd9, 0xd4, 0xc3, 0xce, 0xed, 0xe0, 0xf7, 0xfa, 0xb7, 0xba, 0xad, 0xa0, 0x83, 0x8e, 0x99, 0x94, 0xdf, 0xd2, 0xc5, 0xc8, 0xeb, 0xe6, 0xf1, 0xfc, 0x67, 0x6a, 0x7d, 0x70, 0x53, 0x5e, 0x49, 0x44, 0x0f, 0x02, 0x15, 0x18, 0x3b, 0x36, 0x21, 0x2c, 0x0c, 0x01, 0x16, 0x1b, 0x38, 0x35, 0x22, 0x2f, 0x64, 0x69, 0x7e, 0x73, 0x50, 0x5d, 0x4a, 0x47, 0xdc, 0xd1, 0xc6, 0xcb, 0xe8, 0xe5, 0xf2, 0xff, 0xb4, 0xb9, 0xae, 0xa3, 0x80, 0x8d, 0x9a, 0x97}; __device__ uchar XtimeE[256] = { 0x00, 0x0e, 0x1c, 0x12, 0x38, 0x36, 0x24, 0x2a, 0x70, 0x7e, 0x6c, 0x62, 0x48, 0x46, 0x54, 0x5a, 0xe0, 0xee, 0xfc, 0xf2, 0xd8, 0xd6, 0xc4, 0xca, 0x90, 0x9e, 0x8c, 0x82, 0xa8, 0xa6, 0xb4, 0xba, 0xdb, 0xd5, 0xc7, 0xc9, 0xe3, 0xed, 0xff, 0xf1, 0xab, 0xa5, 0xb7, 0xb9, 0x93, 0x9d, 0x8f, 0x81, 0x3b, 0x35, 0x27, 0x29, 0x03, 0x0d, 0x1f, 0x11, 0x4b, 0x45, 0x57, 0x59, 0x73, 0x7d, 0x6f, 0x61, 0xad, 0xa3, 0xb1, 0xbf, 0x95, 0x9b, 0x89, 0x87, 0xdd, 0xd3, 0xc1, 0xcf, 0xe5, 0xeb, 0xf9, 0xf7, 0x4d, 0x43, 0x51, 0x5f, 0x75, 0x7b, 0x69, 0x67, 0x3d, 0x33, 0x21, 0x2f, 0x05, 0x0b, 0x19, 0x17, 0x76, 0x78, 0x6a, 0x64, 0x4e, 0x40, 0x52, 0x5c, 0x06, 0x08, 0x1a, 0x14, 0x3e, 0x30, 0x22, 0x2c, 0x96, 0x98, 0x8a, 0x84, 0xae, 0xa0, 0xb2, 0xbc, 0xe6, 0xe8, 0xfa, 0xf4, 0xde, 0xd0, 0xc2, 0xcc, 0x41, 0x4f, 0x5d, 0x53, 0x79, 0x77, 0x65, 0x6b, 0x31, 0x3f, 0x2d, 0x23, 0x09, 0x07, 0x15, 0x1b, 0xa1, 0xaf, 0xbd, 0xb3, 0x99, 0x97, 0x85, 0x8b, 0xd1, 0xdf, 0xcd, 0xc3, 0xe9, 0xe7, 0xf5, 0xfb, 0x9a, 0x94, 0x86, 0x88, 0xa2, 0xac, 0xbe, 0xb0, 0xea, 0xe4, 0xf6, 0xf8, 0xd2, 0xdc, 0xce, 0xc0, 0x7a, 0x74, 0x66, 0x68, 0x42, 0x4c, 0x5e, 0x50, 0x0a, 0x04, 0x16, 0x18, 0x32, 0x3c, 0x2e, 0x20, 0xec, 0xe2, 0xf0, 0xfe, 0xd4, 0xda, 0xc8, 0xc6, 0x9c, 0x92, 0x80, 0x8e, 0xa4, 0xaa, 0xb8, 0xb6, 0x0c, 0x02, 0x10, 0x1e, 0x34, 0x3a, 0x28, 0x26, 0x7c, 0x72, 0x60, 0x6e, 0x44, 0x4a, 0x58, 0x56, 0x37, 0x39, 0x2b, 0x25, 0x0f, 0x01, 0x13, 0x1d, 0x47, 0x49, 0x5b, 0x55, 0x7f, 0x71, 0x63, 0x6d, 0xd7, 0xd9, 0xcb, 0xc5, 0xef, 0xe1, 0xf3, 0xfd, 0xa7, 0xa9, 0xbb, 0xb5, 0x9f, 0x91, 0x83, 0x8d}; // exchanges columns in each of 4 rows // row0 - unchanged, row1- shifted left 1, // row2 - shifted left 2 and row3 - shifted left 3 __device__ void ShiftRows (uchar *state) { uchar tmp; // just substitute row 0 state[0] = Sbox[state[0]], state[4] = Sbox[state[4]]; state[8] = Sbox[state[8]], state[12] = Sbox[state[12]]; // rotate row 1 tmp = Sbox[state[1]], state[1] = Sbox[state[5]]; state[5] = Sbox[state[9]], state[9] = Sbox[state[13]], state[13] = tmp; // rotate row 2 tmp = Sbox[state[2]], state[2] = Sbox[state[10]], state[10] = tmp; tmp = Sbox[state[6]], state[6] = Sbox[state[14]], state[14] = tmp; // rotate row 3 tmp = Sbox[state[15]], state[15] = Sbox[state[11]]; state[11] = Sbox[state[7]], state[7] = Sbox[state[3]], state[3] = tmp; } // restores columns in each of 4 rows // row0 - unchanged, row1- shifted right 1, // row2 - shifted right 2 and row3 - shifted right 3 __device__ void InvShiftRows (uchar *state) { uchar tmp; // restore row 0 state[0] = InvSbox[state[0]], state[4] = InvSbox[state[4]]; state[8] = InvSbox[state[8]], state[12] = InvSbox[state[12]]; // restore row 1 tmp = InvSbox[state[13]], state[13] = InvSbox[state[9]]; state[9] = InvSbox[state[5]], state[5] = InvSbox[state[1]], state[1] = tmp; // restore row 2 tmp = InvSbox[state[2]], state[2] = InvSbox[state[10]], state[10] = tmp; tmp = InvSbox[state[6]], state[6] = InvSbox[state[14]], state[14] = tmp; // restore row 3 tmp = InvSbox[state[3]], state[3] = InvSbox[state[7]]; state[7] = InvSbox[state[11]], state[11] = InvSbox[state[15]], state[15] = tmp; } // recombine and mix each row in a column __device__ void MixSubColumns (uchar *state) { uchar tmp[4 * Nb]; // mixing column 0 tmp[0] = Xtime2Sbox[state[0]] ^ Xtime3Sbox[state[5]] ^ Sbox[state[10]] ^ Sbox[state[15]]; tmp[1] = Sbox[state[0]] ^ Xtime2Sbox[state[5]] ^ Xtime3Sbox[state[10]] ^ Sbox[state[15]]; tmp[2] = Sbox[state[0]] ^ Sbox[state[5]] ^ Xtime2Sbox[state[10]] ^ Xtime3Sbox[state[15]]; tmp[3] = Xtime3Sbox[state[0]] ^ Sbox[state[5]] ^ Sbox[state[10]] ^ Xtime2Sbox[state[15]]; // mixing column 1 tmp[4] = Xtime2Sbox[state[4]] ^ Xtime3Sbox[state[9]] ^ Sbox[state[14]] ^ Sbox[state[3]]; tmp[5] = Sbox[state[4]] ^ Xtime2Sbox[state[9]] ^ Xtime3Sbox[state[14]] ^ Sbox[state[3]]; tmp[6] = Sbox[state[4]] ^ Sbox[state[9]] ^ Xtime2Sbox[state[14]] ^ Xtime3Sbox[state[3]]; tmp[7] = Xtime3Sbox[state[4]] ^ Sbox[state[9]] ^ Sbox[state[14]] ^ Xtime2Sbox[state[3]]; // mixing column 2 tmp[8] = Xtime2Sbox[state[8]] ^ Xtime3Sbox[state[13]] ^ Sbox[state[2]] ^ Sbox[state[7]]; tmp[9] = Sbox[state[8]] ^ Xtime2Sbox[state[13]] ^ Xtime3Sbox[state[2]] ^ Sbox[state[7]]; tmp[10] = Sbox[state[8]] ^ Sbox[state[13]] ^ Xtime2Sbox[state[2]] ^ Xtime3Sbox[state[7]]; tmp[11] = Xtime3Sbox[state[8]] ^ Sbox[state[13]] ^ Sbox[state[2]] ^ Xtime2Sbox[state[7]]; // mixing column 3 tmp[12] = Xtime2Sbox[state[12]] ^ Xtime3Sbox[state[1]] ^ Sbox[state[6]] ^ Sbox[state[11]]; tmp[13] = Sbox[state[12]] ^ Xtime2Sbox[state[1]] ^ Xtime3Sbox[state[6]] ^ Sbox[state[11]]; tmp[14] = Sbox[state[12]] ^ Sbox[state[1]] ^ Xtime2Sbox[state[6]] ^ Xtime3Sbox[state[11]]; tmp[15] = Xtime3Sbox[state[12]] ^ Sbox[state[1]] ^ Sbox[state[6]] ^ Xtime2Sbox[state[11]]; memcpy (state, tmp, sizeof(tmp)); } // restore and un-mix each row in a column __device__ void InvMixSubColumns (uchar *state) { uchar tmp[4 * Nb]; int i; // restore column 0 tmp[0] = XtimeE[state[0]] ^ XtimeB[state[1]] ^ XtimeD[state[2]] ^ Xtime9[state[3]]; tmp[5] = Xtime9[state[0]] ^ XtimeE[state[1]] ^ XtimeB[state[2]] ^ XtimeD[state[3]]; tmp[10] = XtimeD[state[0]] ^ Xtime9[state[1]] ^ XtimeE[state[2]] ^ XtimeB[state[3]]; tmp[15] = XtimeB[state[0]] ^ XtimeD[state[1]] ^ Xtime9[state[2]] ^ XtimeE[state[3]]; // restore column 1 tmp[4] = XtimeE[state[4]] ^ XtimeB[state[5]] ^ XtimeD[state[6]] ^ Xtime9[state[7]]; tmp[9] = Xtime9[state[4]] ^ XtimeE[state[5]] ^ XtimeB[state[6]] ^ XtimeD[state[7]]; tmp[14] = XtimeD[state[4]] ^ Xtime9[state[5]] ^ XtimeE[state[6]] ^ XtimeB[state[7]]; tmp[3] = XtimeB[state[4]] ^ XtimeD[state[5]] ^ Xtime9[state[6]] ^ XtimeE[state[7]]; // restore column 2 tmp[8] = XtimeE[state[8]] ^ XtimeB[state[9]] ^ XtimeD[state[10]] ^ Xtime9[state[11]]; tmp[13] = Xtime9[state[8]] ^ XtimeE[state[9]] ^ XtimeB[state[10]] ^ XtimeD[state[11]]; tmp[2] = XtimeD[state[8]] ^ Xtime9[state[9]] ^ XtimeE[state[10]] ^ XtimeB[state[11]]; tmp[7] = XtimeB[state[8]] ^ XtimeD[state[9]] ^ Xtime9[state[10]] ^ XtimeE[state[11]]; // restore column 3 tmp[12] = XtimeE[state[12]] ^ XtimeB[state[13]] ^ XtimeD[state[14]] ^ Xtime9[state[15]]; tmp[1] = Xtime9[state[12]] ^ XtimeE[state[13]] ^ XtimeB[state[14]] ^ XtimeD[state[15]]; tmp[6] = XtimeD[state[12]] ^ Xtime9[state[13]] ^ XtimeE[state[14]] ^ XtimeB[state[15]]; tmp[11] = XtimeB[state[12]] ^ XtimeD[state[13]] ^ Xtime9[state[14]] ^ XtimeE[state[15]]; for( i=0; i < 4 * Nb; i++ ) state[i] = InvSbox[tmp[i]]; } // encrypt/decrypt columns of the key // n.b. you can replace this with // byte-wise xor if you wish. __device__ void AddRoundKey (unsigned *state, unsigned *key) { int idx; for( idx = 0; idx < 4; idx++ ) state[idx] ^= key[idx]; } uchar Rcon[11] = { 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36}; // produce Nb bytes for each round void ExpandKey (uchar *key, uchar *expkey) { uchar tmp0, tmp1, tmp2, tmp3, tmp4; unsigned idx; memcpy (expkey, key, Nk * 4); for( idx = Nk; idx < Nb * (Nr + 1); idx++ ) { tmp0 = expkey[4*idx - 4]; tmp1 = expkey[4*idx - 3]; tmp2 = expkey[4*idx - 2]; tmp3 = expkey[4*idx - 1]; if( !(idx % Nk) ) { tmp4 = tmp3; tmp3 = cpuSbox[tmp0]; tmp0 = cpuSbox[tmp1] ^ Rcon[idx/Nk]; tmp1 = cpuSbox[tmp2]; tmp2 = cpuSbox[tmp4]; } else if( Nk > 6 && idx % Nk == 4 ) { tmp0 = cpuSbox[tmp0]; tmp1 = cpuSbox[tmp1]; tmp2 = cpuSbox[tmp2]; tmp3 = cpuSbox[tmp3]; } expkey[4*idx+0] = expkey[4*idx - 4*Nk + 0] ^ tmp0; expkey[4*idx+1] = expkey[4*idx - 4*Nk + 1] ^ tmp1; expkey[4*idx+2] = expkey[4*idx - 4*Nk + 2] ^ tmp2; expkey[4*idx+3] = expkey[4*idx - 4*Nk + 3] ^ tmp3; } } // encrypt one 128 bit block /*void Encrypt (uchar *in, uchar *expkey, uchar *out) { uchar state[Nb * 4]; unsigned round; memcpy (state, in, Nb * 4); AddRoundKey ((unsigned *)state, (unsigned *)expkey); for( round = 1; round < Nr + 1; round++ ) { if( round < Nr ) MixSubColumns (state); else ShiftRows (state); AddRoundKey ((unsigned *)state, (unsigned *)expkey + round * Nb); } memcpy (out, state, sizeof(state)); }*/ /*void Decrypt (uchar *in, uchar *expkey, uchar *out) { uchar state[Nb * 4]; unsigned round; memcpy (state, in, sizeof(state)); AddRoundKey ((unsigned *)state, (unsigned *)expkey + Nr * Nb); InvShiftRows(state); for( round = Nr; round--; ) { AddRoundKey ((unsigned *)state, (unsigned *)expkey + round * Nb); if( round ) InvMixSubColumns (state); } memcpy (out, state, sizeof(state)); }*/ #include <stdio.h> #include <fcntl.h> uchar* in = (uchar *) malloc(BUF_SIZE); uchar key[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f}; uchar* out = (uchar *) malloc(BUF_SIZE); #ifndef unix void rd_clock (__int64 *ans) { unsigned dwLow, dwHigh; __asm { rdtsc mov dwLow, eax mov dwHigh, edx } *ans = (__int64)dwHigh << 32 | (__int64)dwLow; } #else typedef long long __int64; void rd_clock (__int64 *ans) { unsigned long long dwBoth; __asm__ volatile(".byte 0x0f, 0x31" : "=A"(dwBoth)); *ans = dwBoth; } #endif inline unsigned long int monotonicTime(void) { const unsigned long int NS_PER_SEC = 1000 * 1000 * 1000; struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); return now.tv_sec * NS_PER_SEC + now.tv_nsec; } /*void decrypt (char *mykey, char *name) { uchar expkey[4 * Nb * (Nr + 1)]; FILE *fd = fopen (name, "rb"); int ch, idx = 0; strncpy ((char *) key, mykey, sizeof(key)); ExpandKey (key, expkey); while( ch = getc(fd), ch != EOF ) { in[idx++] = ch; if( idx % 16 ) continue; Decrypt (in, expkey, out); for( idx = 0; idx < 16; idx++ ) putchar (out[idx]); idx = 0; } }*/ __global__ void gpuEncrypt(uchar* in, uchar* expkey, uchar *out, int idx) { uchar state[4 * Nb]; // array of 16 //memcpy (state, in, Nb * 4); //int numOfBlockKey = idx/16; //threadIdx.x*16 for(int i = 16*threadIdx.x+ 16*blockIdx.x*blockDim.x; i < idx; i += 16*blockDim.x*gridDim.x) { //printf("testing gpuIn %d", in[i]); memcpy (state, &in[i], Nb * 4); //threadIdx.x*16+ 16*blockIdx.x*blockDim.x; 16 is number of data that each thread handles AddRoundKey ((unsigned *)state, (unsigned *)expkey); for( unsigned round = 1; round < Nr + 1; round++ ) { if( round < Nr ) MixSubColumns (state); else ShiftRows (state); AddRoundKey ((unsigned *)state, (unsigned *)expkey + round * Nb); } memcpy (&out[i], state, sizeof(state)); //16*blockIdx.x*blockDim.x+threadIdx.x*16 } //printf("testing gpuOut %d", out[0]); } void encrypt (char *mykey, char *name) { uchar expkey[4 * Nb * (Nr + 1)]; FILE *fd = fopen (name, "rb"); int ch, idx = 0; strncpy ((char *)key, mykey, sizeof(key)); ExpandKey (key, expkey); while( ch = getc(fd), ch != EOF ) // insert all cha into in[] arr { if(idx == BUF_SIZE) { cout <<"File is Too Big!"<<endl; exit(0); } in[idx++] = ch; } if(idx > 0 ) while(idx % 16 != 0) in[idx++] = 0; else return; //allocate Memory at CPU uchar* gpuIn; uchar* gpuExpkey; uchar* gpuOut; cudaMalloc(&gpuIn, BUF_SIZE*sizeof(uchar)); cudaMalloc(&gpuOut, BUF_SIZE*sizeof(uchar)); cudaMalloc(&gpuExpkey, 4 * Nb * (Nr + 1)*sizeof(uchar)); //copy data from host to device cudaMemcpy(gpuIn, in, BUF_SIZE*sizeof(uchar), cudaMemcpyHostToDevice); cudaMemcpy(gpuExpkey, expkey, 4 * Nb * (Nr + 1)*sizeof(uchar), cudaMemcpyHostToDevice); // Cuda running gpuEncrypt<<<14,256>>>(gpuIn, gpuExpkey, gpuOut, idx); // copy to out from device to host cudaMemcpy(out, gpuOut, BUF_SIZE * sizeof(uchar) , cudaMemcpyDeviceToHost); for(int i = 0; i < idx; i++) putchar(out[i]); } uchar expkey[4 * Nb * (Nr + 1)]; void mrandom (int, char *); unsigned xrandom (void); int aescycles () { __int64 start, end; int t; do { rd_clock(&start); //Encrypt (in, expkey, out); rd_clock (&end); t = end - start; } while( t<= 0 || t>= 4000); return t; } int bestx (int b, int loops) { int bestx = 0, bestxt = 0; int x, xt, i, j; for( x = 0; x < 256; x++ ) { xt = 0; for( i = 0; i < loops; i++ ) { for( j = 0; j < 16; j++ ) in[j] = xrandom() >> 16; in[b] = x; xt += aescycles(); xt += aescycles(); xt += aescycles(); xt += aescycles(); xt += aescycles(); } if( xt > bestxt ) bestx = x, bestxt = xt; } return bestx; } int main (int argc, char *argv[]) { #ifndef unix extern int __cdecl _setmode (int, int); _setmode (_fileno(stdout), _O_BINARY); #endif switch( argv[1][0] ) { //case 'c': certify(); break; case 'e': encrypt(argv[2], argv[3]); break; //case 'd': decrypt(argv[2], argv[3]); break;; //case 's': sample(); break; } } /* * The package generates far better random numbers than a linear * congruential generator. The random number generation technique * is a linear feedback shift register approach. In this approach, * the least significant bit of all the numbers in the RandTbl table * will act as a linear feedback shift register, and will have period * of approximately 2^96 - 1. * */ #define RAND_order (7 * sizeof(unsigned)) #define RAND_size (96 * sizeof(unsigned)) uchar RandTbl[RAND_size + RAND_order]; int RandHead = 0; /* * random: x**96 + x**7 + x**6 + x**4 + x**3 + x**2 + 1 * * The basic operation is to add to the number at the head index * the XOR sum of the lower order terms in the polynomial. * Then the index is advanced to the next location cyclically * in the table. The value returned is the sum generated. * */ unsigned xrandom () { register unsigned fact; if( (RandHead -= sizeof(unsigned)) < 0 ) { RandHead = RAND_size - sizeof(unsigned); memcpy (RandTbl + RAND_size, RandTbl, RAND_order); } fact = *(unsigned *)(RandTbl + RandHead + 7 * sizeof(unsigned)); fact ^= *(unsigned *)(RandTbl + RandHead + 6 * sizeof(unsigned)); fact ^= *(unsigned *)(RandTbl + RandHead + 4 * sizeof(unsigned)); fact ^= *(unsigned *)(RandTbl + RandHead + 3 * sizeof(unsigned)); fact ^= *(unsigned *)(RandTbl + RandHead + 2 * sizeof(unsigned)); return *(unsigned *)(RandTbl + RandHead) += fact; } /* * mrandom: * Initialize the random number generator based on the given seed. * */ void mrandom (int len, char *ptr) { unsigned short rand = *ptr; int idx, bit = len * 4; memset (RandTbl, 0, sizeof(RandTbl)); RandHead = 0; while( rand *= 20077, rand += 11, bit-- ) if( ptr[bit >> 2] & (1 << (bit & 3)) ) for (idx = 0; idx < 5; idx++) { rand *= 20077, rand += 11; RandTbl[rand % 96 << 2] ^= 1; } for( idx = 0; idx < 96 * 63; idx++ ) xrandom (); }
2ecb29fdd5eb16ed37db5777692338ce393b63b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal d -> s */ #include "common_magma.h" #include "commonblas_d.h" /* * daxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void daxpy( double alpha, const double* __restrict__ b, double* __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } /** Purpose: -------- This routine computes C = alpha * A^T*B^T + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4 This kernel is for matrices divisible by the corresponding blocking sizes. @ingroup magma_dblas3 ********************************************************************/ __global__ void dgemm_kernel_T_T_64_16_16_16_4_special( double* __restrict__ C, const double* __restrict__ A, const double* __restrict__ B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta ) { __shared__ double Bb[16][17]; const int tx = threadIdx.x; const int ty = threadIdx.y; int iby = ((blockIdx.y + blockIdx.x) % (n/16))*16; const int idt = ty * 16 + tx; int ibx = blockIdx.x * 64+idt; //int iby = blockIdx.y * 16; A += ibx; B += tx + __mul24(iby+ty, ldb); C += __mul24(ibx, ldc) + iby; const double *Bend = B + k; double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; Bb[tx][ty+0 ] = B[0*ldb]; Bb[tx][ty+4 ] = B[4*ldb]; Bb[tx][ty+8 ] = B[8*ldb]; Bb[tx][ty+12] = B[12*ldb]; __syncthreads(); A += 4 * lda; daxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[12][0], Cb ); daxpy( Ab[1], &Bb[13][0], Cb ); daxpy( Ab[2], &Bb[14][0], Cb ); daxpy( Ab[3], &Bb[15][0], Cb ); B += 16; __syncthreads(); } while (B < Bend); #pragma unroll 16 for(int i = 0; i < 16; i++) { C[i] = alpha * Cb[i] + beta * C[i]; } } extern "C" void magmablas_dgemm_T_T_64_16_16_16_4_special( double *C, const double *A, const double *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, double alpha, double beta ) { dim3 threads( 16, 4 ); dim3 grid( m/64, n/16 ); hipLaunchKernelGGL(( dgemm_kernel_T_T_64_16_16_16_4_special), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta ); }
2ecb29fdd5eb16ed37db5777692338ce393b63b3.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal d -> s */ #include "common_magma.h" #include "commonblas_d.h" /* * daxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void daxpy( double alpha, const double* __restrict__ b, double* __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } /** Purpose: -------- This routine computes C = alpha * A^T*B^T + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4 This kernel is for matrices divisible by the corresponding blocking sizes. @ingroup magma_dblas3 ********************************************************************/ __global__ void dgemm_kernel_T_T_64_16_16_16_4_special( double* __restrict__ C, const double* __restrict__ A, const double* __restrict__ B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta ) { __shared__ double Bb[16][17]; const int tx = threadIdx.x; const int ty = threadIdx.y; int iby = ((blockIdx.y + blockIdx.x) % (n/16))*16; const int idt = ty * 16 + tx; int ibx = blockIdx.x * 64+idt; //int iby = blockIdx.y * 16; A += ibx; B += tx + __mul24(iby+ty, ldb); C += __mul24(ibx, ldc) + iby; const double *Bend = B + k; double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; Bb[tx][ty+0 ] = B[0*ldb]; Bb[tx][ty+4 ] = B[4*ldb]; Bb[tx][ty+8 ] = B[8*ldb]; Bb[tx][ty+12] = B[12*ldb]; __syncthreads(); A += 4 * lda; daxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[12][0], Cb ); daxpy( Ab[1], &Bb[13][0], Cb ); daxpy( Ab[2], &Bb[14][0], Cb ); daxpy( Ab[3], &Bb[15][0], Cb ); B += 16; __syncthreads(); } while (B < Bend); #pragma unroll 16 for(int i = 0; i < 16; i++) { C[i] = alpha * Cb[i] + beta * C[i]; } } extern "C" void magmablas_dgemm_T_T_64_16_16_16_4_special( double *C, const double *A, const double *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, double alpha, double beta ) { dim3 threads( 16, 4 ); dim3 grid( m/64, n/16 ); dgemm_kernel_T_T_64_16_16_16_4_special<<< grid, threads, 0, magma_stream >>> ( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta ); }
castFloat4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*--------------------------------------------------------------------------*\ Copyright (c) 2008-2010, Danny Ruijters. All rights reserved. http://www.dannyruijters.nl/cubicinterpolation/ This file is part of CUDA Cubic B-Spline Interpolation (CI). Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied. \*--------------------------------------------------------------------------*/ #ifndef _CAST_FLOAT4_H_ #define _CAST_FLOAT4_H_ #include "memcpy.cu" //-------------------------------------------------------------------------- // Declare the interleaved copu CUDA kernel //-------------------------------------------------------------------------- template<class T> __global__ void CopyCastInterleaved(uchar* destination, const T* source, uint pitch, uint width) { uint2 index = make_uint2( __umul24(blockIdx.x, blockDim.x) + threadIdx.x, __umul24(blockIdx.y, blockDim.y) + threadIdx.y); uint index3 = 3 * (index.y * width + index.x); float4* dest = (float4*)(destination + index.y * pitch) + index.x; float mult = 1.0f / Multiplier<T>(); *dest = make_float4( mult * (float)source[index3], mult * (float)source[index3+1], mult * (float)source[index3+2], 1.0f); } //-------------------------------------------------------------------------- // Declare the typecast templated function // This function can be called directly in C++ programs //-------------------------------------------------------------------------- //! Allocate GPU memory and copy a voxel volume from CPU to GPU memory //! and cast it to the normalized floating point format //! @return the pointer to the GPU copy of the voxel volume //! @param host pointer to the voxel volume in CPU (host) memory //! @param width volume width in number of voxels //! @param height volume height in number of voxels //! @param depth volume depth in number of voxels template<class T> extern hipPitchedPtr CastVolumeHost3ToDevice4(const T* host, uint width, uint height, uint depth) { hipPitchedPtr device = {0}; const hipExtent extent = make_hipExtent(width * sizeof(float4), height, depth); CUDA_SAFE_CALL(hipMalloc3D(&device, extent)); const size_t pitchedBytesPerSlice = device.pitch * device.ysize; T* temp = 0; const uint voxelsPerSlice = width * height; const size_t nrOfBytesTemp = voxelsPerSlice * 3 * sizeof(T); CUDA_SAFE_CALL(hipMalloc((void**)&temp, nrOfBytesTemp)); uint dimX = min(PowTwoDivider(width), 64); dim3 dimBlock(dimX, min(PowTwoDivider(height), 512 / dimX)); dim3 dimGrid(width / dimBlock.x, height / dimBlock.y); size_t offsetHost = 0; size_t offsetDevice = 0; for (uint slice = 0; slice < depth; slice++) { CUDA_SAFE_CALL(hipMemcpy(temp, host + offsetHost, nrOfBytesTemp, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( CopyCastInterleaved<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, (uchar*)device.ptr + offsetDevice, temp, (uint)device.pitch, width); CUT_CHECK_ERROR("Cast kernel failed"); offsetHost += voxelsPerSlice; offsetDevice += pitchedBytesPerSlice; } CUDA_SAFE_CALL(hipFree(temp)); //free the temp GPU volume return device; } #endif //_CAST_FLOAT4_H_
castFloat4.cu
/*--------------------------------------------------------------------------*\ Copyright (c) 2008-2010, Danny Ruijters. All rights reserved. http://www.dannyruijters.nl/cubicinterpolation/ This file is part of CUDA Cubic B-Spline Interpolation (CI). Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied. \*--------------------------------------------------------------------------*/ #ifndef _CAST_FLOAT4_H_ #define _CAST_FLOAT4_H_ #include "memcpy.cu" //-------------------------------------------------------------------------- // Declare the interleaved copu CUDA kernel //-------------------------------------------------------------------------- template<class T> __global__ void CopyCastInterleaved(uchar* destination, const T* source, uint pitch, uint width) { uint2 index = make_uint2( __umul24(blockIdx.x, blockDim.x) + threadIdx.x, __umul24(blockIdx.y, blockDim.y) + threadIdx.y); uint index3 = 3 * (index.y * width + index.x); float4* dest = (float4*)(destination + index.y * pitch) + index.x; float mult = 1.0f / Multiplier<T>(); *dest = make_float4( mult * (float)source[index3], mult * (float)source[index3+1], mult * (float)source[index3+2], 1.0f); } //-------------------------------------------------------------------------- // Declare the typecast templated function // This function can be called directly in C++ programs //-------------------------------------------------------------------------- //! Allocate GPU memory and copy a voxel volume from CPU to GPU memory //! and cast it to the normalized floating point format //! @return the pointer to the GPU copy of the voxel volume //! @param host pointer to the voxel volume in CPU (host) memory //! @param width volume width in number of voxels //! @param height volume height in number of voxels //! @param depth volume depth in number of voxels template<class T> extern cudaPitchedPtr CastVolumeHost3ToDevice4(const T* host, uint width, uint height, uint depth) { cudaPitchedPtr device = {0}; const cudaExtent extent = make_cudaExtent(width * sizeof(float4), height, depth); CUDA_SAFE_CALL(cudaMalloc3D(&device, extent)); const size_t pitchedBytesPerSlice = device.pitch * device.ysize; T* temp = 0; const uint voxelsPerSlice = width * height; const size_t nrOfBytesTemp = voxelsPerSlice * 3 * sizeof(T); CUDA_SAFE_CALL(cudaMalloc((void**)&temp, nrOfBytesTemp)); uint dimX = min(PowTwoDivider(width), 64); dim3 dimBlock(dimX, min(PowTwoDivider(height), 512 / dimX)); dim3 dimGrid(width / dimBlock.x, height / dimBlock.y); size_t offsetHost = 0; size_t offsetDevice = 0; for (uint slice = 0; slice < depth; slice++) { CUDA_SAFE_CALL(cudaMemcpy(temp, host + offsetHost, nrOfBytesTemp, cudaMemcpyHostToDevice)); CopyCastInterleaved<T><<<dimGrid, dimBlock>>>((uchar*)device.ptr + offsetDevice, temp, (uint)device.pitch, width); CUT_CHECK_ERROR("Cast kernel failed"); offsetHost += voxelsPerSlice; offsetDevice += pitchedBytesPerSlice; } CUDA_SAFE_CALL(cudaFree(temp)); //free the temp GPU volume return device; } #endif //_CAST_FLOAT4_H_
1e79242c7b40640b713a3aa43efc4020da70117a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduceUnrolling (int *g_idata, int *g_odata, unsigned int n, unsigned int q) //added int q { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * q + threadIdx.x; // q adapted idx // unroll analogous q if (idx + blockDim.x*(q-1) < n) { for (int i=1; i<q; i++) { g_idata[idx] += g_idata[idx + blockDim.x*i]; } } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { if (tid < stride) { g_idata[idx] += g_idata[idx + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = g_idata[idx]; }
1e79242c7b40640b713a3aa43efc4020da70117a.cu
#include "includes.h" __global__ void reduceUnrolling (int *g_idata, int *g_odata, unsigned int n, unsigned int q) //added int q { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * q + threadIdx.x; // q adapted idx // unroll analogous q if (idx + blockDim.x*(q-1) < n) { for (int i=1; i<q; i++) { g_idata[idx] += g_idata[idx + blockDim.x*i]; } } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { if (tid < stride) { g_idata[idx] += g_idata[idx + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = g_idata[idx]; }
4ba1f599f1cf84f1d6425e5f144ea93aab8aa37f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // kernel.cu // // Created by Arya Mazaheri on 01/12/2018. // #include <iostream> #include <algorithm> #include <cmath> #include "ppm.h" using namespace std; /*********** Gray Scale Filter *********/ /** * Converts a given 24bpp image into 8bpp grayscale using the GPU. */ __global__ void cuda_grayscale(int width, int height, BYTE *image, BYTE *image_out) { //TODO (9 pt): implement grayscale filter kernel } // 1D Gaussian kernel array values of a fixed size (make sure the number > filter size d) //TODO: Define the cGaussian array on the constant memory (2 pt) void cuda_updateGaussian(int r, double sd) { float fGaussian[64]; for (int i = 0; i < 2*r +1 ; i++) { float x = i - r; fGaussian[i] = expf(-(x*x) / (2 * sd*sd)); } //TODO: Copy computed fGaussian to the cGaussian on device memory (2 pts) hipMemcpyToSymbol(cGaussian, /* TODO */); } //TODO: implement cuda_gaussian() kernel (3 pts) /*********** Bilateral Filter *********/ // Parallel (GPU) Bilateral filter kernel __global__ void cuda_bilateral_filter(BYTE* input, BYTE* output, int width, int height, int r, double sI, double sS) { //TODO: implement bilateral filter kernel (9 pts) } void gpu_pipeline(const Image & input, Image & output, int r, double sI, double sS) { // Events to calculate gpu run time hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); // GPU related variables BYTE *d_input = NULL; BYTE *d_image_out[2] = {0}; //temporary output buffers on gpu device int image_size = input.cols*input.rows; int suggested_blockSize; // The launch configurator returned block size int suggested_minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch // ******* Grayscale kernel launch ************* //Creating the block size for grayscaling kernel hipOccupancyMaxPotentialBlockSize( &suggested_minGridSize, &suggested_blockSize, cuda_grayscale); int block_dim_x, block_dim_y; block_dim_x = block_dim_y = (int) sqrt(suggested_blockSize); dim3 gray_block(/* TODO */); // 2 pts //TODO: Calculate grid size to cover the whole image - 2 pts // Allocate the intermediate image buffers for each step Image img_out(input.cols, input.rows, 1, "P5"); for (int i = 0; i < 2; i++) { //TODO: allocate memory on the device (2 pts) //TODO: intialize allocated memory on device to zero (2 pts) } //copy input image to device //TODO: Allocate memory on device for input image (2 pts) //TODO: Copy input image into the device memory (2 pts) hipEventRecord(start, 0); // start timer // Convert input image to grayscale //TODO: Launch cuda_grayscale() (2 pts) hipEventRecord(stop, 0); // stop timer hipEventSynchronize(stop); // Calculate and print kernel run time hipEventElapsedTime(&time, start, stop); cout << "GPU Grayscaling time: " << time << " (ms)\n"; cout << "Launched blocks of size " << gray_block.x * gray_block.y << endl; //TODO: transfer image from device to the main memory for saving onto the disk (2 pts) savePPM(img_out, "image_gpu_gray.ppm"); // ******* Bilateral filter kernel launch ************* //Creating the block size for grayscaling kernel hipOccupancyMaxPotentialBlockSize( &suggested_minGridSize, &suggested_blockSize, cuda_bilateral_filter); block_dim_x = block_dim_y = (int) sqrt(suggested_blockSize); dim3 bilateral_block(/* TODO */); // 2 pts //TODO: Calculate grid size to cover the whole image - 2pts // Create gaussain 1d array cuda_updateGaussian(r,sS); hipEventRecord(start, 0); // start timer //TODO: Launch cuda_bilateral_filter() (2 pts) hipEventRecord(stop, 0); // stop timer hipEventSynchronize(stop); // Calculate and print kernel run time hipEventElapsedTime(&time, start, stop); cout << "GPU Bilateral Filter time: " << time << " (ms)\n"; cout << "Launched blocks of size " << bilateral_block.x * bilateral_block.y << endl; // Copy output from device to host //TODO: transfer image from device to the main memory for saving onto the disk (2 pts) // ************** Finalization, cleaning up ************ // Free GPU variables //TODO: Free device allocated memory (3 pts) }
4ba1f599f1cf84f1d6425e5f144ea93aab8aa37f.cu
// // kernel.cu // // Created by Arya Mazaheri on 01/12/2018. // #include <iostream> #include <algorithm> #include <cmath> #include "ppm.h" using namespace std; /*********** Gray Scale Filter *********/ /** * Converts a given 24bpp image into 8bpp grayscale using the GPU. */ __global__ void cuda_grayscale(int width, int height, BYTE *image, BYTE *image_out) { //TODO (9 pt): implement grayscale filter kernel } // 1D Gaussian kernel array values of a fixed size (make sure the number > filter size d) //TODO: Define the cGaussian array on the constant memory (2 pt) void cuda_updateGaussian(int r, double sd) { float fGaussian[64]; for (int i = 0; i < 2*r +1 ; i++) { float x = i - r; fGaussian[i] = expf(-(x*x) / (2 * sd*sd)); } //TODO: Copy computed fGaussian to the cGaussian on device memory (2 pts) cudaMemcpyToSymbol(cGaussian, /* TODO */); } //TODO: implement cuda_gaussian() kernel (3 pts) /*********** Bilateral Filter *********/ // Parallel (GPU) Bilateral filter kernel __global__ void cuda_bilateral_filter(BYTE* input, BYTE* output, int width, int height, int r, double sI, double sS) { //TODO: implement bilateral filter kernel (9 pts) } void gpu_pipeline(const Image & input, Image & output, int r, double sI, double sS) { // Events to calculate gpu run time cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); // GPU related variables BYTE *d_input = NULL; BYTE *d_image_out[2] = {0}; //temporary output buffers on gpu device int image_size = input.cols*input.rows; int suggested_blockSize; // The launch configurator returned block size int suggested_minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch // ******* Grayscale kernel launch ************* //Creating the block size for grayscaling kernel cudaOccupancyMaxPotentialBlockSize( &suggested_minGridSize, &suggested_blockSize, cuda_grayscale); int block_dim_x, block_dim_y; block_dim_x = block_dim_y = (int) sqrt(suggested_blockSize); dim3 gray_block(/* TODO */); // 2 pts //TODO: Calculate grid size to cover the whole image - 2 pts // Allocate the intermediate image buffers for each step Image img_out(input.cols, input.rows, 1, "P5"); for (int i = 0; i < 2; i++) { //TODO: allocate memory on the device (2 pts) //TODO: intialize allocated memory on device to zero (2 pts) } //copy input image to device //TODO: Allocate memory on device for input image (2 pts) //TODO: Copy input image into the device memory (2 pts) cudaEventRecord(start, 0); // start timer // Convert input image to grayscale //TODO: Launch cuda_grayscale() (2 pts) cudaEventRecord(stop, 0); // stop timer cudaEventSynchronize(stop); // Calculate and print kernel run time cudaEventElapsedTime(&time, start, stop); cout << "GPU Grayscaling time: " << time << " (ms)\n"; cout << "Launched blocks of size " << gray_block.x * gray_block.y << endl; //TODO: transfer image from device to the main memory for saving onto the disk (2 pts) savePPM(img_out, "image_gpu_gray.ppm"); // ******* Bilateral filter kernel launch ************* //Creating the block size for grayscaling kernel cudaOccupancyMaxPotentialBlockSize( &suggested_minGridSize, &suggested_blockSize, cuda_bilateral_filter); block_dim_x = block_dim_y = (int) sqrt(suggested_blockSize); dim3 bilateral_block(/* TODO */); // 2 pts //TODO: Calculate grid size to cover the whole image - 2pts // Create gaussain 1d array cuda_updateGaussian(r,sS); cudaEventRecord(start, 0); // start timer //TODO: Launch cuda_bilateral_filter() (2 pts) cudaEventRecord(stop, 0); // stop timer cudaEventSynchronize(stop); // Calculate and print kernel run time cudaEventElapsedTime(&time, start, stop); cout << "GPU Bilateral Filter time: " << time << " (ms)\n"; cout << "Launched blocks of size " << bilateral_block.x * bilateral_block.y << endl; // Copy output from device to host //TODO: transfer image from device to the main memory for saving onto the disk (2 pts) // ************** Finalization, cleaning up ************ // Free GPU variables //TODO: Free device allocated memory (3 pts) }
96634800e0191ff1e036a4bd475f20e049314141.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * CUDArrays is a library for easy multi-GPU program development. * * The MIT License (MIT) * * Copyright (c) 2013-2015 Barcelona Supercomputing Center and * University of Illinois * * Developed by: Javier Cabezas <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <cstdio> #include <cudarrays/static_array.hpp> using namespace cudarrays; template <typename T> __host__ __device__ void test_single(T &A) { for (unsigned i = 0; i < A.template dim<0>(); ++i) { for (unsigned j = 0; j < A.template dim<1>(); ++j) { A(i, j) = 1; } } A(1, 1) = 3; for (unsigned i = 0; i < A.template dim<0>(); ++i) { for (unsigned j = 0; j < A.template dim<1>(); ++j) { printf("%d ", A(i, j)); } printf("\n"); } } __global__ void test_kernel_single() { static_array<int [3][3]> A; test_single(A); } static constexpr size_t BLOCK_X = 3; static constexpr size_t BLOCK_Y = 3; __global__ void test_kernel_shared() { static_array<int [BLOCK_Y][BLOCK_X], memory_space::shared, layout::rmo, align<4>> A; bool first = threadIdx.y == 0 && threadIdx.x == 0; bool central = threadIdx.y == 1 && threadIdx.x == 1; int value = central? 3: 1; A(threadIdx.y, threadIdx.x) = value; __syncthreads(); if (first) for (unsigned i = 0; i < A.dim<0>(); ++i) { for (unsigned j = 0; j < A.dim<1>(); ++j) { printf("%d ", A(i, j)); } printf("\n"); } } int main() { static_array<int [3][3], memory_space::local, layout::rmo, align<1024, 2>> A; printf("Host\n"); test_single(A); printf("Device\n"); hipLaunchKernelGGL(( test_kernel_single), dim3(1), dim3(1), 0, 0, ); hipDeviceSynchronize(); printf("Device __shared__\n"); hipLaunchKernelGGL(( test_kernel_shared), dim3(1), dim3(dim3(BLOCK_X, BLOCK_Y)), 0, 0, ); hipDeviceSynchronize(); return 0; }
96634800e0191ff1e036a4bd475f20e049314141.cu
/* * CUDArrays is a library for easy multi-GPU program development. * * The MIT License (MIT) * * Copyright (c) 2013-2015 Barcelona Supercomputing Center and * University of Illinois * * Developed by: Javier Cabezas <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <cstdio> #include <cudarrays/static_array.hpp> using namespace cudarrays; template <typename T> __host__ __device__ void test_single(T &A) { for (unsigned i = 0; i < A.template dim<0>(); ++i) { for (unsigned j = 0; j < A.template dim<1>(); ++j) { A(i, j) = 1; } } A(1, 1) = 3; for (unsigned i = 0; i < A.template dim<0>(); ++i) { for (unsigned j = 0; j < A.template dim<1>(); ++j) { printf("%d ", A(i, j)); } printf("\n"); } } __global__ void test_kernel_single() { static_array<int [3][3]> A; test_single(A); } static constexpr size_t BLOCK_X = 3; static constexpr size_t BLOCK_Y = 3; __global__ void test_kernel_shared() { static_array<int [BLOCK_Y][BLOCK_X], memory_space::shared, layout::rmo, align<4>> A; bool first = threadIdx.y == 0 && threadIdx.x == 0; bool central = threadIdx.y == 1 && threadIdx.x == 1; int value = central? 3: 1; A(threadIdx.y, threadIdx.x) = value; __syncthreads(); if (first) for (unsigned i = 0; i < A.dim<0>(); ++i) { for (unsigned j = 0; j < A.dim<1>(); ++j) { printf("%d ", A(i, j)); } printf("\n"); } } int main() { static_array<int [3][3], memory_space::local, layout::rmo, align<1024, 2>> A; printf("Host\n"); test_single(A); printf("Device\n"); test_kernel_single<<<1, 1>>>(); cudaDeviceSynchronize(); printf("Device __shared__\n"); test_kernel_shared<<<1, dim3(BLOCK_X, BLOCK_Y)>>>(); cudaDeviceSynchronize(); return 0; }