hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
a48fac72582d333ca1c1a6baad6fd98b5e5bd074.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/kernels/sparse/conv_grad_kernel.h" #include "glog/logging.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/sparse/gpu/conv.cu.h" #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS #include "paddle/phi/kernels/sparse/gpu/gather_gemm_scatter.h" #endif namespace phi { namespace sparse { extern size_t workspace_size; // rulebook[3, rulebook_len]: //[ // [kernel_index], // [in_i], // [out_i], //] // x_grad = out_grad * transpose(kenrel) // kernel_grad = transpose(x) * out_grad template <typename T, typename IntT> void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, const DenseTensor& kernel, const SparseCooTensor& out, const DenseTensor& rulebook, const DenseTensor& counter, const SparseCooTensor& out_grad, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, const int groups, const bool subm, const std::string& key, SparseCooTensor* x_grad, DenseTensor* kernel_grad) { const bool is_params_freezing = kernel_grad == nullptr; const auto& kernel_dims = kernel.dims(); const bool is2D = kernel_dims.size() == 4 ? true : false; const int kernel_size = is2D ? kernel_dims[0] * kernel_dims[1] : kernel_dims[0] * kernel_dims[1] * kernel_dims[2]; const int in_channels = is2D ? kernel_dims[2] : kernel_dims[3]; const int out_channels = is2D ? kernel_dims[3] : kernel_dims[4]; int rulebook_len = 0; const IntT* rulebook_ptr = phi::funcs::sparse::GetRulebookPtr<IntT>( out, rulebook, key, &rulebook_len); const int* counter_ptr = phi::funcs::sparse::GetCounterPtr(out, counter, key); phi::DenseTensor in_features = phi::Empty<T>(dev_ctx, {rulebook_len, in_channels}); phi::DenseTensor d_x_features = phi::Empty<T>(dev_ctx, {rulebook_len, in_channels}); phi::DenseTensor out_grad_features = phi::Empty<T>(dev_ctx, {rulebook_len, out_channels}); T* in_features_ptr = in_features.data<T>(); T* d_x_features_ptr = d_x_features.data<T>(); T* out_grad_features_ptr = out_grad_features.data<T>(); T* d_kernel_ptr = nullptr; if (!is_params_freezing) { *kernel_grad = phi::EmptyLike<T>(dev_ctx, kernel); d_kernel_ptr = kernel_grad->data<T>(); phi::backends::gpu::GpuMemsetAsync( d_kernel_ptr, 0, sizeof(T) * kernel_grad->numel(), dev_ctx.stream()); } int half_kernel_size = kernel_size / 2; auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx); DenseTensor x_grad_indices = phi::EmptyLike<IntT>(dev_ctx, x.indices()); DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.values()); T* x_grad_values_ptr = x_grad_values.data<T>(); phi::backends::gpu::GpuMemsetAsync(x_grad_values_ptr, 0, sizeof(T) * x_grad_values.numel(), dev_ctx.stream()); phi::backends::gpu::GpuMemsetAsync( d_x_features_ptr, 0, sizeof(T) * d_x_features.numel(), dev_ctx.stream()); phi::Copy<GPUContext>( dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &x_grad_indices); x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true); std::vector<int> offsets(kernel_size + 1); int offset = 0, max_count = 0; for (int i = 0; i < kernel_size; i++) { offsets[i] = offset; offset += counter_ptr[i]; if (i < half_kernel_size) { max_count = ::max(max_count, counter_ptr[i]); } } offsets[kernel_size] = offset; if (subm) { phi::funcs::sparse::SubmPreProcess<T, GPUContext>(dev_ctx, x, kernel, out_grad.values(), in_channels, out_channels, half_kernel_size, kernel_grad, &x_grad_values); if (max_count == 0) { return; } } auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rulebook_len, 1); DenseTensor unique_value = phi::Empty<int>( dev_ctx, {static_cast<int>(x_grad->nnz() * kernel_size * 2)}); DenseTensor out_index = phi::Empty<int>(dev_ctx, {static_cast<int>(x.nnz() * 2)}); int* out_index_ptr = out_index.data<int>(); int* unique_value_ptr = unique_value.data<int>(); phi::backends::gpu::GpuMemsetAsync( out_index_ptr, 0, sizeof(int) * x.nnz() * 2, dev_ctx.stream()); #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS bool cutlass = true; if (dev_ctx.GetComputeCapability() < 80) cutlass = false; if (in_channels % 4 != 0 || out_channels % 4 != 0) cutlass = false; if (std::is_same<T, phi::dtype::float16>::value || std::is_same<T, double>::value) cutlass = false; if (!std::is_same<IntT, int32_t>::value) cutlass = false; if (!cutlass) { #endif hipLaunchKernelGGL(( GroupIndexsV2), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), rulebook_len, x.nnz(), kernel_size, offsets[kernel_size / 2], rulebook_ptr, out_index_ptr, unique_value_ptr); GatherV2<T, IntT>(dev_ctx, x.values().data<T>(), out_index_ptr, unique_value_ptr, x.nnz(), kernel_size, in_channels, 2, in_features_ptr); Gather<T, IntT>(dev_ctx, out_grad.values().data<T>(), rulebook_ptr + rulebook_len, rulebook_len, out_channels, out_grad_features_ptr); #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS } #endif const T* kernel_ptr = kernel.data<T>(); T* tmp_d_x_ptr = nullptr; T* tmp_d_kernel_ptr = nullptr; for (int i = 0; i < kernel_size; i++) { if (counter_ptr[i] <= 0 || (subm && i == half_kernel_size)) { continue; } const int M = counter_ptr[i]; const int K = in_channels; const int N = out_channels; T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels; T* tmp_out_grad_ptr = out_grad_features_ptr + offsets[i] * out_channels; const T* tmp_kernel_ptr = kernel_ptr + i * in_channels * out_channels; tmp_d_x_ptr = d_x_features_ptr + offsets[i] * in_channels; if (!is_params_freezing) { tmp_d_kernel_ptr = d_kernel_ptr + i * in_channels * out_channels; } #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS if (cutlass) { const IntT* gather_x_indices = rulebook_ptr + offsets[i]; const IntT* scatter_x_indices = rulebook_ptr + offsets[i]; const IntT* gather_out_indices = rulebook_ptr + rulebook_len + offsets[i]; const size_t key = autotune::GenKey(M / features_num_range, N, K); if (!is_params_freezing) { // call gemm: d_kernel = transpose(x) * out_grad // (in_channels, n) * (n, out_channels) static cutlass::device_memory::allocation<uint8_t> workspace( workspace_size); GatherGemmScatterDriver<80, true, false>( dev_ctx, key, x.values().data<T>(), out_grad.values().data<T>(), tmp_d_kernel_ptr, tmp_d_kernel_ptr, in_channels, out_channels, counter_ptr[i], gather_x_indices, gather_out_indices, static_cast<const IntT*>(nullptr), static_cast<const T>(1.0), static_cast<const T>(0.0), &workspace); } // call gemm: d_x = out_grad * transpose(kernel) // (n, out_channels) * (out_channels, in_channels) GatherGemmScatterDriver<80, false, true>( dev_ctx, key, out_grad.values().data<T>(), tmp_kernel_ptr, x_grad_values_ptr, x_grad_values_ptr, counter_ptr[i], in_channels, out_channels, gather_out_indices, static_cast<const IntT*>(nullptr), scatter_x_indices, static_cast<const T>(1.0), static_cast<const T>(1.0), nullptr); } else { #endif if (!is_params_freezing) { // call gemm: d_kernel = transpose(x) * out_grad // (in_channels, n) * (n, out_channels) blas.GEMM(CblasTrans, CblasNoTrans, K, N, M, static_cast<T>(1), tmp_in_ptr, tmp_out_grad_ptr, static_cast<T>(0), tmp_d_kernel_ptr); } // call gemm: d_x = out_grad * transpose(kernel) // (n, out_channels) * (out_channels, in_channels) blas.GEMM(CblasNoTrans, CblasTrans, M, K, N, static_cast<T>(1), tmp_out_grad_ptr, tmp_kernel_ptr, static_cast<T>(0), tmp_d_x_ptr); #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS } #endif } // 4. scatter #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS if (!cutlass) { #endif phi::funcs::sparse::ScatterV2<T>(dev_ctx, d_x_features_ptr, out_index.data<int>(), unique_value.data<int>(), x_grad->nnz(), kernel_size, in_channels, 2, x_grad_values_ptr); #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS } #endif } template <typename T, typename Context> void Conv3dCooGradKernel(const Context& dev_ctx, const SparseCooTensor& x, const DenseTensor& kernel, const SparseCooTensor& out, const DenseTensor& rulebook, const DenseTensor& counter, const SparseCooTensor& out_grad, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, const int groups, const bool subm, const std::string& key, SparseCooTensor* x_grad, DenseTensor* kernel_grad) { PD_VISIT_BASE_INTEGRAL_TYPES( x.indices().dtype(), "Conv3dCooGradGPUKernel", ([&] { Conv3dCooGradGPUKernel<T, data_t>(dev_ctx, x, kernel, out, rulebook, counter, out_grad, paddings, dilations, strides, groups, subm, key, x_grad, kernel_grad); })); } } // namespace sparse } // namespace phi PD_REGISTER_KERNEL(conv3d_coo_grad, GPU, ALL_LAYOUT, phi::sparse::Conv3dCooGradKernel, float, double, phi::dtype::float16) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); }
a48fac72582d333ca1c1a6baad6fd98b5e5bd074.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/kernels/sparse/conv_grad_kernel.h" #include "glog/logging.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/sparse/gpu/conv.cu.h" #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS #include "paddle/phi/kernels/sparse/gpu/gather_gemm_scatter.h" #endif namespace phi { namespace sparse { extern size_t workspace_size; // rulebook[3, rulebook_len]: //[ // [kernel_index], // [in_i], // [out_i], //] // x_grad = out_grad * transpose(kenrel) // kernel_grad = transpose(x) * out_grad template <typename T, typename IntT> void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, const DenseTensor& kernel, const SparseCooTensor& out, const DenseTensor& rulebook, const DenseTensor& counter, const SparseCooTensor& out_grad, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, const int groups, const bool subm, const std::string& key, SparseCooTensor* x_grad, DenseTensor* kernel_grad) { const bool is_params_freezing = kernel_grad == nullptr; const auto& kernel_dims = kernel.dims(); const bool is2D = kernel_dims.size() == 4 ? true : false; const int kernel_size = is2D ? kernel_dims[0] * kernel_dims[1] : kernel_dims[0] * kernel_dims[1] * kernel_dims[2]; const int in_channels = is2D ? kernel_dims[2] : kernel_dims[3]; const int out_channels = is2D ? kernel_dims[3] : kernel_dims[4]; int rulebook_len = 0; const IntT* rulebook_ptr = phi::funcs::sparse::GetRulebookPtr<IntT>( out, rulebook, key, &rulebook_len); const int* counter_ptr = phi::funcs::sparse::GetCounterPtr(out, counter, key); phi::DenseTensor in_features = phi::Empty<T>(dev_ctx, {rulebook_len, in_channels}); phi::DenseTensor d_x_features = phi::Empty<T>(dev_ctx, {rulebook_len, in_channels}); phi::DenseTensor out_grad_features = phi::Empty<T>(dev_ctx, {rulebook_len, out_channels}); T* in_features_ptr = in_features.data<T>(); T* d_x_features_ptr = d_x_features.data<T>(); T* out_grad_features_ptr = out_grad_features.data<T>(); T* d_kernel_ptr = nullptr; if (!is_params_freezing) { *kernel_grad = phi::EmptyLike<T>(dev_ctx, kernel); d_kernel_ptr = kernel_grad->data<T>(); phi::backends::gpu::GpuMemsetAsync( d_kernel_ptr, 0, sizeof(T) * kernel_grad->numel(), dev_ctx.stream()); } int half_kernel_size = kernel_size / 2; auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx); DenseTensor x_grad_indices = phi::EmptyLike<IntT>(dev_ctx, x.indices()); DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.values()); T* x_grad_values_ptr = x_grad_values.data<T>(); phi::backends::gpu::GpuMemsetAsync(x_grad_values_ptr, 0, sizeof(T) * x_grad_values.numel(), dev_ctx.stream()); phi::backends::gpu::GpuMemsetAsync( d_x_features_ptr, 0, sizeof(T) * d_x_features.numel(), dev_ctx.stream()); phi::Copy<GPUContext>( dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &x_grad_indices); x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true); std::vector<int> offsets(kernel_size + 1); int offset = 0, max_count = 0; for (int i = 0; i < kernel_size; i++) { offsets[i] = offset; offset += counter_ptr[i]; if (i < half_kernel_size) { max_count = std::max(max_count, counter_ptr[i]); } } offsets[kernel_size] = offset; if (subm) { phi::funcs::sparse::SubmPreProcess<T, GPUContext>(dev_ctx, x, kernel, out_grad.values(), in_channels, out_channels, half_kernel_size, kernel_grad, &x_grad_values); if (max_count == 0) { return; } } auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rulebook_len, 1); DenseTensor unique_value = phi::Empty<int>( dev_ctx, {static_cast<int>(x_grad->nnz() * kernel_size * 2)}); DenseTensor out_index = phi::Empty<int>(dev_ctx, {static_cast<int>(x.nnz() * 2)}); int* out_index_ptr = out_index.data<int>(); int* unique_value_ptr = unique_value.data<int>(); phi::backends::gpu::GpuMemsetAsync( out_index_ptr, 0, sizeof(int) * x.nnz() * 2, dev_ctx.stream()); #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS bool cutlass = true; if (dev_ctx.GetComputeCapability() < 80) cutlass = false; if (in_channels % 4 != 0 || out_channels % 4 != 0) cutlass = false; if (std::is_same<T, phi::dtype::float16>::value || std::is_same<T, double>::value) cutlass = false; if (!std::is_same<IntT, int32_t>::value) cutlass = false; if (!cutlass) { #endif GroupIndexsV2<<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(rulebook_len, x.nnz(), kernel_size, offsets[kernel_size / 2], rulebook_ptr, out_index_ptr, unique_value_ptr); GatherV2<T, IntT>(dev_ctx, x.values().data<T>(), out_index_ptr, unique_value_ptr, x.nnz(), kernel_size, in_channels, 2, in_features_ptr); Gather<T, IntT>(dev_ctx, out_grad.values().data<T>(), rulebook_ptr + rulebook_len, rulebook_len, out_channels, out_grad_features_ptr); #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS } #endif const T* kernel_ptr = kernel.data<T>(); T* tmp_d_x_ptr = nullptr; T* tmp_d_kernel_ptr = nullptr; for (int i = 0; i < kernel_size; i++) { if (counter_ptr[i] <= 0 || (subm && i == half_kernel_size)) { continue; } const int M = counter_ptr[i]; const int K = in_channels; const int N = out_channels; T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels; T* tmp_out_grad_ptr = out_grad_features_ptr + offsets[i] * out_channels; const T* tmp_kernel_ptr = kernel_ptr + i * in_channels * out_channels; tmp_d_x_ptr = d_x_features_ptr + offsets[i] * in_channels; if (!is_params_freezing) { tmp_d_kernel_ptr = d_kernel_ptr + i * in_channels * out_channels; } #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS if (cutlass) { const IntT* gather_x_indices = rulebook_ptr + offsets[i]; const IntT* scatter_x_indices = rulebook_ptr + offsets[i]; const IntT* gather_out_indices = rulebook_ptr + rulebook_len + offsets[i]; const size_t key = autotune::GenKey(M / features_num_range, N, K); if (!is_params_freezing) { // call gemm: d_kernel = transpose(x) * out_grad // (in_channels, n) * (n, out_channels) static cutlass::device_memory::allocation<uint8_t> workspace( workspace_size); GatherGemmScatterDriver<80, true, false>( dev_ctx, key, x.values().data<T>(), out_grad.values().data<T>(), tmp_d_kernel_ptr, tmp_d_kernel_ptr, in_channels, out_channels, counter_ptr[i], gather_x_indices, gather_out_indices, static_cast<const IntT*>(nullptr), static_cast<const T>(1.0), static_cast<const T>(0.0), &workspace); } // call gemm: d_x = out_grad * transpose(kernel) // (n, out_channels) * (out_channels, in_channels) GatherGemmScatterDriver<80, false, true>( dev_ctx, key, out_grad.values().data<T>(), tmp_kernel_ptr, x_grad_values_ptr, x_grad_values_ptr, counter_ptr[i], in_channels, out_channels, gather_out_indices, static_cast<const IntT*>(nullptr), scatter_x_indices, static_cast<const T>(1.0), static_cast<const T>(1.0), nullptr); } else { #endif if (!is_params_freezing) { // call gemm: d_kernel = transpose(x) * out_grad // (in_channels, n) * (n, out_channels) blas.GEMM(CblasTrans, CblasNoTrans, K, N, M, static_cast<T>(1), tmp_in_ptr, tmp_out_grad_ptr, static_cast<T>(0), tmp_d_kernel_ptr); } // call gemm: d_x = out_grad * transpose(kernel) // (n, out_channels) * (out_channels, in_channels) blas.GEMM(CblasNoTrans, CblasTrans, M, K, N, static_cast<T>(1), tmp_out_grad_ptr, tmp_kernel_ptr, static_cast<T>(0), tmp_d_x_ptr); #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS } #endif } // 4. scatter #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS if (!cutlass) { #endif phi::funcs::sparse::ScatterV2<T>(dev_ctx, d_x_features_ptr, out_index.data<int>(), unique_value.data<int>(), x_grad->nnz(), kernel_size, in_channels, 2, x_grad_values_ptr); #if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS } #endif } template <typename T, typename Context> void Conv3dCooGradKernel(const Context& dev_ctx, const SparseCooTensor& x, const DenseTensor& kernel, const SparseCooTensor& out, const DenseTensor& rulebook, const DenseTensor& counter, const SparseCooTensor& out_grad, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, const int groups, const bool subm, const std::string& key, SparseCooTensor* x_grad, DenseTensor* kernel_grad) { PD_VISIT_BASE_INTEGRAL_TYPES( x.indices().dtype(), "Conv3dCooGradGPUKernel", ([&] { Conv3dCooGradGPUKernel<T, data_t>(dev_ctx, x, kernel, out, rulebook, counter, out_grad, paddings, dilations, strides, groups, subm, key, x_grad, kernel_grad); })); } } // namespace sparse } // namespace phi PD_REGISTER_KERNEL(conv3d_coo_grad, GPU, ALL_LAYOUT, phi::sparse::Conv3dCooGradKernel, float, double, phi::dtype::float16) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); }
4db8239bdd0f164929ef9bf3f5cfbf471473eeea.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> #include <cmath> #include <vector> #include <string> #include "solver.h" using namespace std; typedef unsigned char uchar; int num_train = 512, num_test = 500; int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int) ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *) &n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *) &n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printTimes(vector<float> &time, string filename); void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename); int main(int argc, char *argv[]) { // int num_train = 100 * batch_size, num_val = batch_size; // void *X_train = malloc(num_train * input_channels * sizeof(float)); // int *y_train = (int *)malloc(num_train * sizeof(int)); // void *X_val = malloc(num_val * input_channels * sizeof(float)); // int *y_val = (int *)malloc(num_val * sizeof(int)); // for (int i = 0; i < num_train; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_train[i] = 0; // } // for (int i = 0; i < num_val; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_val[i] = rand() % 2; // } // int rows = 28, cols = 28, channels = 1; // vector<vector<uchar> > train_images, test_images; // vector<uchar> train_labels, test_labels; // readMNIST(train_images, test_images, train_labels, test_labels); // float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels; float *f_train_images, *f_test_images; int *f_train_labels, *f_test_labels; int rows = 227, cols = 227, channels = 3; int input_size = rows * cols * channels; // f_train_images = (float *)malloc(num_train * input_size * sizeof(float)); // f_train_labels = (int *)malloc(num_train * sizeof(int)); checkCudaErrors(hipHostMalloc(&f_train_images, num_train * input_size * sizeof(float))); checkCudaErrors(hipHostMalloc(&f_train_labels, num_train * sizeof(int))); f_test_images = (float *)malloc(num_test * input_size * sizeof(float)); f_test_labels = (int *)malloc(num_test * sizeof(int)); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < num_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= num_train; } for (int i = 0; i < num_train; i++) { for (int j = 0; j < input_size; j++) { f_train_images[i * input_size + j] -= mean_image[j]; } } for (int i = 0; i < num_test; i++) { for (int j = 0; j < input_size; j++) { f_test_images[i * input_size + j] -= mean_image[j]; } } // int input_channels = rows * cols * channels * 3, hidden_channels1 = 50, hidden_channels2 = 100, output_channels = 10; // vector<LayerSpecifier> layer_specifier; // ConvDescriptor layer0; // LayerSpecifier temp; // layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // ActivationDescriptor layer0_actv; // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // BatchNormDescriptor layer0_bn; // for (int i = 0; i < 200; i++) { // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // } // PoolingDescriptor layer0_pool; // layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX); // temp.initPointer(POOLING); // *((PoolingDescriptor *)temp.params) = layer0_pool; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // // DropoutDescriptor layer0_dropout; // // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2); // // temp.initPointer(DROPOUT); // // *((DropoutDescriptor *)temp.params) = layer0_dropout; // // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // FCDescriptor layer1; // layer1.initializeValues(input_channels, hidden_channels1); // temp.initPointer(FULLY_CONNECTED); // *((FCDescriptor *)(temp.params)) = layer1; // layer_specifier.push_back(temp); // temp.initPointer(ACTV); // ActivationDescriptor layer1_actv; // layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1); // *((ActivationDescriptor *)temp.params) = layer1_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1, hidden_channels1, 1, 1); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // temp.initPointer(FULLY_CONNECTED); // FCDescriptor layer2; // layer2.initializeValues(hidden_channels1, output_channels); // *((FCDescriptor *)temp.params) = layer2; // layer_specifier.push_back(temp); // // temp.initPointer(FULLY_CONNECTED); // // FCDescriptor layer3; // // layer3.initializeValues(hidden_channels2, output_channels); // // *((FCDescriptor *)temp.params) = layer3; // // layer_specifier.push_back(temp); // temp.initPointer(SOFTMAX); // SoftmaxDescriptor smax; // smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, output_channels, 1, 1); // *((SoftmaxDescriptor *)(temp.params)) = smax; // layer_specifier.push_back(temp); // AlexNet vector<LayerSpecifier> layer_specifier; { ConvDescriptor layer0; layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer0; layer_specifier.push_back(temp); } { PoolingDescriptor layer1; layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer1; layer_specifier.push_back(temp); } { ConvDescriptor layer2; layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer2; layer_specifier.push_back(temp); } { PoolingDescriptor layer3; layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer3; layer_specifier.push_back(temp); } { ConvDescriptor layer4; layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer4; layer_specifier.push_back(temp); } { ConvDescriptor layer5; layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer5; layer_specifier.push_back(temp); } { ConvDescriptor layer6; layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer6; layer_specifier.push_back(temp); } { PoolingDescriptor layer7; layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer7; layer_specifier.push_back(temp); } { FCDescriptor layer8; layer8.initializeValues(9216, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer8; layer_specifier.push_back(temp); } { FCDescriptor layer9; layer9.initializeValues(4096, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer9; layer_specifier.push_back(temp); } { FCDescriptor layer10; layer10.initializeValues(4096, 1000); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer10; layer_specifier.push_back(temp); } { SoftmaxDescriptor layer11; layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1); LayerSpecifier temp; temp.initPointer(SOFTMAX); *((SoftmaxDescriptor *)temp.params) = layer11; layer_specifier.push_back(temp); } vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; vDNNType vdnn_type = vDNN_DYN; string filename("vdnn_dyn"); if (argc == 3) { filename.assign("vdnn"); // argv[1] - layers to offload, argv[2] - conv algo to use if (strcmp(argv[1], "dyn") == 0) { vdnn_type = vDNN_DYN; filename.append("_dyn"); } else if (strcmp(argv[1], "conv") == 0) { vdnn_type = vDNN_CONV; filename.append("_conv"); } else if (strcmp(argv[1], "all") == 0) { vdnn_type = vDNN_ALL; filename.append("_all"); } else if (strcmp(argv[1], "alternate_conv") == 0) { vdnn_type = vDNN_ALTERNATE_CONV; filename.append("_alternate_conv"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0 or strcmp(argv[1], "alternate_conv") == 0)) { if (strcmp(argv[2], "p") == 0) { vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; filename.append("_p"); } else if (strcmp(argv[2], "m") == 0) { vdnn_conv_algo = vDNN_MEMORY_OPTIMAL; filename.append("_m"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } } } int batch_size = 256; long long dropout_seed = 1; float softmax_eps = 1e-8; float init_std_dev = 0.1; NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vdnn_type, vdnn_conv_algo, SGD); int num_epoch = 1000; double learning_rate = 1e-3; double learning_rate_decay = 0.9; Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train); vector<float> loss; vector<float> time; vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag; solver.getTrainTime(loss, time, 100, fwd_vdnn_lag, bwd_vdnn_lag); printTimes(time, filename); printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename); } void printTimes(vector<float> &time, string filename) { float mean_time = 0.0; float std_dev = 0.0; int N = time.size(); for (int i = 0; i < N; i++) { mean_time += time[i]; } mean_time /= N; for (int i = 0; i < N; i++) { std_dev += pow(time[i] - mean_time, 2); } std_dev /= N; std_dev = pow(std_dev, 0.5); cout << "Average time: " << mean_time << endl; cout << "Standard deviation: " << std_dev << endl; filename.append(".dat"); fstream f; f.open(filename.c_str(), ios_base::out); for (int i = 0; i < N; i++) { f << time[i] << endl; } f << "mean_time: " << mean_time << endl; f << "standard_deviation: " << std_dev << endl; f.close(); filename.append(".bin"); fstream f_bin; f_bin.open(filename.c_str(), ios_base::out); f_bin.write((char *)&N, sizeof(N)); for (int i = 0; i < N; i++) { f_bin.write((char *)&time[i], sizeof(time[i])); } f_bin.close(); } void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename) { filename.append("_lag.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_vdnn_lag.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) { f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl; } for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) { f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl; } f << endl; } f.close(); }
4db8239bdd0f164929ef9bf3f5cfbf471473eeea.cu
#include <iostream> #include <cstdlib> #include <cmath> #include <vector> #include <string> #include "solver.h" using namespace std; typedef unsigned char uchar; int num_train = 512, num_test = 500; int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int) ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *) &n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *) &n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printTimes(vector<float> &time, string filename); void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename); int main(int argc, char *argv[]) { // int num_train = 100 * batch_size, num_val = batch_size; // void *X_train = malloc(num_train * input_channels * sizeof(float)); // int *y_train = (int *)malloc(num_train * sizeof(int)); // void *X_val = malloc(num_val * input_channels * sizeof(float)); // int *y_val = (int *)malloc(num_val * sizeof(int)); // for (int i = 0; i < num_train; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_train[i] = 0; // } // for (int i = 0; i < num_val; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_val[i] = rand() % 2; // } // int rows = 28, cols = 28, channels = 1; // vector<vector<uchar> > train_images, test_images; // vector<uchar> train_labels, test_labels; // readMNIST(train_images, test_images, train_labels, test_labels); // float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels; float *f_train_images, *f_test_images; int *f_train_labels, *f_test_labels; int rows = 227, cols = 227, channels = 3; int input_size = rows * cols * channels; // f_train_images = (float *)malloc(num_train * input_size * sizeof(float)); // f_train_labels = (int *)malloc(num_train * sizeof(int)); checkCudaErrors(cudaMallocHost(&f_train_images, num_train * input_size * sizeof(float))); checkCudaErrors(cudaMallocHost(&f_train_labels, num_train * sizeof(int))); f_test_images = (float *)malloc(num_test * input_size * sizeof(float)); f_test_labels = (int *)malloc(num_test * sizeof(int)); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < num_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= num_train; } for (int i = 0; i < num_train; i++) { for (int j = 0; j < input_size; j++) { f_train_images[i * input_size + j] -= mean_image[j]; } } for (int i = 0; i < num_test; i++) { for (int j = 0; j < input_size; j++) { f_test_images[i * input_size + j] -= mean_image[j]; } } // int input_channels = rows * cols * channels * 3, hidden_channels1 = 50, hidden_channels2 = 100, output_channels = 10; // vector<LayerSpecifier> layer_specifier; // ConvDescriptor layer0; // LayerSpecifier temp; // layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // ActivationDescriptor layer0_actv; // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // BatchNormDescriptor layer0_bn; // for (int i = 0; i < 200; i++) { // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // } // PoolingDescriptor layer0_pool; // layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX); // temp.initPointer(POOLING); // *((PoolingDescriptor *)temp.params) = layer0_pool; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // // DropoutDescriptor layer0_dropout; // // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2); // // temp.initPointer(DROPOUT); // // *((DropoutDescriptor *)temp.params) = layer0_dropout; // // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // FCDescriptor layer1; // layer1.initializeValues(input_channels, hidden_channels1); // temp.initPointer(FULLY_CONNECTED); // *((FCDescriptor *)(temp.params)) = layer1; // layer_specifier.push_back(temp); // temp.initPointer(ACTV); // ActivationDescriptor layer1_actv; // layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1); // *((ActivationDescriptor *)temp.params) = layer1_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1, hidden_channels1, 1, 1); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // temp.initPointer(FULLY_CONNECTED); // FCDescriptor layer2; // layer2.initializeValues(hidden_channels1, output_channels); // *((FCDescriptor *)temp.params) = layer2; // layer_specifier.push_back(temp); // // temp.initPointer(FULLY_CONNECTED); // // FCDescriptor layer3; // // layer3.initializeValues(hidden_channels2, output_channels); // // *((FCDescriptor *)temp.params) = layer3; // // layer_specifier.push_back(temp); // temp.initPointer(SOFTMAX); // SoftmaxDescriptor smax; // smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, output_channels, 1, 1); // *((SoftmaxDescriptor *)(temp.params)) = smax; // layer_specifier.push_back(temp); // AlexNet vector<LayerSpecifier> layer_specifier; { ConvDescriptor layer0; layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer0; layer_specifier.push_back(temp); } { PoolingDescriptor layer1; layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer1; layer_specifier.push_back(temp); } { ConvDescriptor layer2; layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer2; layer_specifier.push_back(temp); } { PoolingDescriptor layer3; layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer3; layer_specifier.push_back(temp); } { ConvDescriptor layer4; layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer4; layer_specifier.push_back(temp); } { ConvDescriptor layer5; layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer5; layer_specifier.push_back(temp); } { ConvDescriptor layer6; layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer6; layer_specifier.push_back(temp); } { PoolingDescriptor layer7; layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer7; layer_specifier.push_back(temp); } { FCDescriptor layer8; layer8.initializeValues(9216, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer8; layer_specifier.push_back(temp); } { FCDescriptor layer9; layer9.initializeValues(4096, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer9; layer_specifier.push_back(temp); } { FCDescriptor layer10; layer10.initializeValues(4096, 1000); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer10; layer_specifier.push_back(temp); } { SoftmaxDescriptor layer11; layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1); LayerSpecifier temp; temp.initPointer(SOFTMAX); *((SoftmaxDescriptor *)temp.params) = layer11; layer_specifier.push_back(temp); } vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; vDNNType vdnn_type = vDNN_DYN; string filename("vdnn_dyn"); if (argc == 3) { filename.assign("vdnn"); // argv[1] - layers to offload, argv[2] - conv algo to use if (strcmp(argv[1], "dyn") == 0) { vdnn_type = vDNN_DYN; filename.append("_dyn"); } else if (strcmp(argv[1], "conv") == 0) { vdnn_type = vDNN_CONV; filename.append("_conv"); } else if (strcmp(argv[1], "all") == 0) { vdnn_type = vDNN_ALL; filename.append("_all"); } else if (strcmp(argv[1], "alternate_conv") == 0) { vdnn_type = vDNN_ALTERNATE_CONV; filename.append("_alternate_conv"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0 or strcmp(argv[1], "alternate_conv") == 0)) { if (strcmp(argv[2], "p") == 0) { vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; filename.append("_p"); } else if (strcmp(argv[2], "m") == 0) { vdnn_conv_algo = vDNN_MEMORY_OPTIMAL; filename.append("_m"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } } } int batch_size = 256; long long dropout_seed = 1; float softmax_eps = 1e-8; float init_std_dev = 0.1; NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vdnn_type, vdnn_conv_algo, SGD); int num_epoch = 1000; double learning_rate = 1e-3; double learning_rate_decay = 0.9; Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train); vector<float> loss; vector<float> time; vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag; solver.getTrainTime(loss, time, 100, fwd_vdnn_lag, bwd_vdnn_lag); printTimes(time, filename); printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename); } void printTimes(vector<float> &time, string filename) { float mean_time = 0.0; float std_dev = 0.0; int N = time.size(); for (int i = 0; i < N; i++) { mean_time += time[i]; } mean_time /= N; for (int i = 0; i < N; i++) { std_dev += pow(time[i] - mean_time, 2); } std_dev /= N; std_dev = pow(std_dev, 0.5); cout << "Average time: " << mean_time << endl; cout << "Standard deviation: " << std_dev << endl; filename.append(".dat"); fstream f; f.open(filename.c_str(), ios_base::out); for (int i = 0; i < N; i++) { f << time[i] << endl; } f << "mean_time: " << mean_time << endl; f << "standard_deviation: " << std_dev << endl; f.close(); filename.append(".bin"); fstream f_bin; f_bin.open(filename.c_str(), ios_base::out); f_bin.write((char *)&N, sizeof(N)); for (int i = 0; i < N; i++) { f_bin.write((char *)&time[i], sizeof(time[i])); } f_bin.close(); } void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename) { filename.append("_lag.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_vdnn_lag.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) { f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl; } for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) { f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl; } f << endl; } f.close(); }
e205edbae8df222995a788eb751a2565cbf7a6ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detection/sigmoid_focal_loss_op.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/kernels/funcs/math.h" namespace paddle { namespace operators { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T> __global__ void GPUSigmoidFocalLossForward(const T *x_data, const int *label_data, const int *fg_num_data, const T gamma, const T alpha, const int num_classes, const int limit, T *out_data) { CUDA_KERNEL_LOOP(i, limit) { T x = x_data[i]; int a = i / num_classes; // current sample int d = i % num_classes; // current class int g = label_data[a]; // target // check whether the input data is positive or negative // the target classes are in range 1-81 // and the d is in range 0-80 T c_pos = static_cast<T>(g == (d + 1)); T c_neg = static_cast<T>((g != -1) & (g != (d + 1))); T fg_num = static_cast<T>((fg_num_data[0] > 1) ? fg_num_data[0] : 1); T s_neg = (1.0 - alpha) / fg_num; T s_pos = alpha / fg_num; // p = 1. / 1. + expf(-x) T p = 1. / (1. + phi::funcs::real_exp(-x)); // (1 - p)**gamma * log(p) T term_pos = ::pow(static_cast<T>(1. - p), gamma) * phi::funcs::real_log(p > FLT_MIN ? p : FLT_MIN); // p**gamma * log(1 - p) T term_neg = ::pow(p, gamma) * (-1. * x * (x >= 0) - phi::funcs::real_log( 1. + phi::funcs::real_exp(x - 2. * x * (x >= 0)))); out_data[i] = 0.0; out_data[i] += -c_pos * term_pos * s_pos; out_data[i] += -c_neg * term_neg * s_neg; } } template <typename T> __global__ void GPUSigmoidFocalLossBackward(const T *x_data, const int *label_data, const int *fg_num_data, const T gamma, const T alpha, const int num_classes, const T *dout_data, const int limit, T *dx_data) { CUDA_KERNEL_LOOP(i, limit) { T x = x_data[i]; T dout = dout_data[i]; int a = i / num_classes; // current sample int d = i % num_classes; // current class T fg_num = static_cast<T>((fg_num_data[0] > 1) ? fg_num_data[0] : 1); T s_neg = (1.0 - alpha) / fg_num; T s_pos = alpha / fg_num; int g = label_data[a]; T c_pos = static_cast<T>(g == (d + 1)); T c_neg = static_cast<T>((g != -1) & (g != (d + 1))); T p = 1. / (1. + phi::funcs::real_exp(-x)); // (1-p)**g * (1 - p - g*p*log(p)) T term_pos = ::pow(static_cast<T>(1. - p), gamma) * (1. - p - (p * gamma * phi::funcs::real_log(p > FLT_MIN ? p : FLT_MIN))); // (p**g) * (g*(1-p)*log(1-p) - p) T term_neg = ::pow(p, gamma) * ((-1. * x * (x >= 0) - phi::funcs::real_log( 1. + phi::funcs::real_exp(x - 2. * x * (x >= 0)))) * (1. - p) * gamma - p); dx_data[i] = 0.0; dx_data[i] += -c_pos * s_pos * term_pos; dx_data[i] += -c_neg * s_neg * term_neg; dx_data[i] = dx_data[i] * dout; } } template <typename T, typename DeviceContext> class GPUSigmoidFocalLossKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { const phi::DenseTensor *X = context.Input<phi::DenseTensor>("X"); const phi::DenseTensor *Labels = context.Input<phi::DenseTensor>("Label"); const phi::DenseTensor *FgNum = context.Input<phi::DenseTensor>("FgNum"); phi::DenseTensor *Out = context.Output<phi::DenseTensor>("Out"); T gamma = static_cast<T>(context.Attr<float>("gamma")); T alpha = static_cast<T>(context.Attr<float>("alpha")); auto x_dims = X->dims(); int num_classes = static_cast<int>(x_dims[1]); auto out_data = Out->mutable_data<T>(context.GetPlace()); auto &dev_ctx = context.cuda_device_context(); int limit = Out->numel(); int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; hipLaunchKernelGGL(( GPUSigmoidFocalLossForward<T>) , dim3(blocks), dim3(threads), 0, dev_ctx.stream(), X->data<T>(), Labels->data<int>(), FgNum->data<int>(), gamma, alpha, num_classes, limit, out_data); } }; template <typename T, typename DeviceContext> class GPUSigmoidFocalLossGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { const phi::DenseTensor *X = context.Input<phi::DenseTensor>("X"); const phi::DenseTensor *Labels = context.Input<phi::DenseTensor>("Label"); const phi::DenseTensor *FgNum = context.Input<phi::DenseTensor>("FgNum"); const phi::DenseTensor *dOut = context.Input<phi::DenseTensor>(framework::GradVarName("Out")); phi::DenseTensor *dX = context.Output<phi::DenseTensor>(framework::GradVarName("X")); auto dx_data = dX->mutable_data<T>(context.GetPlace()); T gamma = static_cast<T>(context.Attr<float>("gamma")); T alpha = static_cast<T>(context.Attr<float>("alpha")); auto x_dims = X->dims(); int num_classes = static_cast<int>(x_dims[1]); auto &dev_ctx = context.cuda_device_context(); int limit = dX->numel(); int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; hipLaunchKernelGGL(( GPUSigmoidFocalLossBackward<T>) , dim3(blocks), dim3(threads), 0, dev_ctx.stream(), X->data<T>(), Labels->data<int>(), FgNum->data<int>(), gamma, alpha, num_classes, dOut->data<T>(), limit, dx_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; PD_REGISTER_STRUCT_KERNEL(sigmoid_focal_loss, GPU, ALL_LAYOUT, ops::GPUSigmoidFocalLossKernel, float, double) {} PD_REGISTER_STRUCT_KERNEL(sigmoid_focal_loss_grad, GPU, ALL_LAYOUT, ops::GPUSigmoidFocalLossGradKernel, float, double) {}
e205edbae8df222995a788eb751a2565cbf7a6ff.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detection/sigmoid_focal_loss_op.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/kernels/funcs/math.h" namespace paddle { namespace operators { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T> __global__ void GPUSigmoidFocalLossForward(const T *x_data, const int *label_data, const int *fg_num_data, const T gamma, const T alpha, const int num_classes, const int limit, T *out_data) { CUDA_KERNEL_LOOP(i, limit) { T x = x_data[i]; int a = i / num_classes; // current sample int d = i % num_classes; // current class int g = label_data[a]; // target // check whether the input data is positive or negative // the target classes are in range 1-81 // and the d is in range 0-80 T c_pos = static_cast<T>(g == (d + 1)); T c_neg = static_cast<T>((g != -1) & (g != (d + 1))); T fg_num = static_cast<T>((fg_num_data[0] > 1) ? fg_num_data[0] : 1); T s_neg = (1.0 - alpha) / fg_num; T s_pos = alpha / fg_num; // p = 1. / 1. + expf(-x) T p = 1. / (1. + phi::funcs::real_exp(-x)); // (1 - p)**gamma * log(p) T term_pos = std::pow(static_cast<T>(1. - p), gamma) * phi::funcs::real_log(p > FLT_MIN ? p : FLT_MIN); // p**gamma * log(1 - p) T term_neg = std::pow(p, gamma) * (-1. * x * (x >= 0) - phi::funcs::real_log( 1. + phi::funcs::real_exp(x - 2. * x * (x >= 0)))); out_data[i] = 0.0; out_data[i] += -c_pos * term_pos * s_pos; out_data[i] += -c_neg * term_neg * s_neg; } } template <typename T> __global__ void GPUSigmoidFocalLossBackward(const T *x_data, const int *label_data, const int *fg_num_data, const T gamma, const T alpha, const int num_classes, const T *dout_data, const int limit, T *dx_data) { CUDA_KERNEL_LOOP(i, limit) { T x = x_data[i]; T dout = dout_data[i]; int a = i / num_classes; // current sample int d = i % num_classes; // current class T fg_num = static_cast<T>((fg_num_data[0] > 1) ? fg_num_data[0] : 1); T s_neg = (1.0 - alpha) / fg_num; T s_pos = alpha / fg_num; int g = label_data[a]; T c_pos = static_cast<T>(g == (d + 1)); T c_neg = static_cast<T>((g != -1) & (g != (d + 1))); T p = 1. / (1. + phi::funcs::real_exp(-x)); // (1-p)**g * (1 - p - g*p*log(p)) T term_pos = std::pow(static_cast<T>(1. - p), gamma) * (1. - p - (p * gamma * phi::funcs::real_log(p > FLT_MIN ? p : FLT_MIN))); // (p**g) * (g*(1-p)*log(1-p) - p) T term_neg = std::pow(p, gamma) * ((-1. * x * (x >= 0) - phi::funcs::real_log( 1. + phi::funcs::real_exp(x - 2. * x * (x >= 0)))) * (1. - p) * gamma - p); dx_data[i] = 0.0; dx_data[i] += -c_pos * s_pos * term_pos; dx_data[i] += -c_neg * s_neg * term_neg; dx_data[i] = dx_data[i] * dout; } } template <typename T, typename DeviceContext> class GPUSigmoidFocalLossKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { const phi::DenseTensor *X = context.Input<phi::DenseTensor>("X"); const phi::DenseTensor *Labels = context.Input<phi::DenseTensor>("Label"); const phi::DenseTensor *FgNum = context.Input<phi::DenseTensor>("FgNum"); phi::DenseTensor *Out = context.Output<phi::DenseTensor>("Out"); T gamma = static_cast<T>(context.Attr<float>("gamma")); T alpha = static_cast<T>(context.Attr<float>("alpha")); auto x_dims = X->dims(); int num_classes = static_cast<int>(x_dims[1]); auto out_data = Out->mutable_data<T>(context.GetPlace()); auto &dev_ctx = context.cuda_device_context(); int limit = Out->numel(); int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; GPUSigmoidFocalLossForward<T> <<<blocks, threads, 0, dev_ctx.stream()>>>(X->data<T>(), Labels->data<int>(), FgNum->data<int>(), gamma, alpha, num_classes, limit, out_data); } }; template <typename T, typename DeviceContext> class GPUSigmoidFocalLossGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { const phi::DenseTensor *X = context.Input<phi::DenseTensor>("X"); const phi::DenseTensor *Labels = context.Input<phi::DenseTensor>("Label"); const phi::DenseTensor *FgNum = context.Input<phi::DenseTensor>("FgNum"); const phi::DenseTensor *dOut = context.Input<phi::DenseTensor>(framework::GradVarName("Out")); phi::DenseTensor *dX = context.Output<phi::DenseTensor>(framework::GradVarName("X")); auto dx_data = dX->mutable_data<T>(context.GetPlace()); T gamma = static_cast<T>(context.Attr<float>("gamma")); T alpha = static_cast<T>(context.Attr<float>("alpha")); auto x_dims = X->dims(); int num_classes = static_cast<int>(x_dims[1]); auto &dev_ctx = context.cuda_device_context(); int limit = dX->numel(); int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; GPUSigmoidFocalLossBackward<T> <<<blocks, threads, 0, dev_ctx.stream()>>>(X->data<T>(), Labels->data<int>(), FgNum->data<int>(), gamma, alpha, num_classes, dOut->data<T>(), limit, dx_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; PD_REGISTER_STRUCT_KERNEL(sigmoid_focal_loss, GPU, ALL_LAYOUT, ops::GPUSigmoidFocalLossKernel, float, double) {} PD_REGISTER_STRUCT_KERNEL(sigmoid_focal_loss_grad, GPU, ALL_LAYOUT, ops::GPUSigmoidFocalLossGradKernel, float, double) {}
4154b3f6d6ccb14e3cd3363d6416359526f82bdd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from clag2z.cu mixed zc -> ds, Fri Sep 11 18:29:19 2015 @author Mark Gates */ #include "common_magma.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to slat2d and zlaset. */ __global__ void slag2d_kernel( int m, int n, const float *SA, int ldsa, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ), MAGMA_S_IMAG( SA[j*ldsa] )); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ), MAGMA_S_IMAG( SA[j*ldsa] )); } } } } /** Purpose ------- SLAG2D_STREAM converts a single-real matrix, SA, to a double-real matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] m INTEGER The number of lines of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] SA REAL array, dimension (LDSA,N) On entry, the M-by-N coefficient matrix SA. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,M). @param[out] A DOUBLE PRECISION array, dimension (LDA,N) On exit, the M-by-N coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slag2d_q( magma_int_t m, magma_int_t n, magmaFloat_const_ptr SA, magma_int_t ldsa, magmaDouble_ptr A, magma_int_t lda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( m < 0 ) *info = -1; else if ( n < 0 ) *info = -2; else if ( ldsa < max(1,m) ) *info = -4; else if ( lda < max(1,m) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) ); hipLaunchKernelGGL(( slag2d_kernel), dim3(grid), dim3(threads), 0, queue , m, n, SA, ldsa, A, lda ); } /** @see magmablas_slag2d_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slag2d( magma_int_t m, magma_int_t n, magmaFloat_const_ptr SA, magma_int_t ldsa, magmaDouble_ptr A, magma_int_t lda, magma_int_t *info ) { magmablas_slag2d_q( m, n, SA, ldsa, A, lda, magma_stream, info ); }
4154b3f6d6ccb14e3cd3363d6416359526f82bdd.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from clag2z.cu mixed zc -> ds, Fri Sep 11 18:29:19 2015 @author Mark Gates */ #include "common_magma.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to slat2d and zlaset. */ __global__ void slag2d_kernel( int m, int n, const float *SA, int ldsa, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ), MAGMA_S_IMAG( SA[j*ldsa] )); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ), MAGMA_S_IMAG( SA[j*ldsa] )); } } } } /** Purpose ------- SLAG2D_STREAM converts a single-real matrix, SA, to a double-real matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] m INTEGER The number of lines of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] SA REAL array, dimension (LDSA,N) On entry, the M-by-N coefficient matrix SA. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,M). @param[out] A DOUBLE PRECISION array, dimension (LDA,N) On exit, the M-by-N coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slag2d_q( magma_int_t m, magma_int_t n, magmaFloat_const_ptr SA, magma_int_t ldsa, magmaDouble_ptr A, magma_int_t lda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( m < 0 ) *info = -1; else if ( n < 0 ) *info = -2; else if ( ldsa < max(1,m) ) *info = -4; else if ( lda < max(1,m) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) ); slag2d_kernel<<< grid, threads, 0, queue >>> ( m, n, SA, ldsa, A, lda ); } /** @see magmablas_slag2d_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slag2d( magma_int_t m, magma_int_t n, magmaFloat_const_ptr SA, magma_int_t ldsa, magmaDouble_ptr A, magma_int_t lda, magma_int_t *info ) { magmablas_slag2d_q( m, n, SA, ldsa, A, lda, magma_stream, info ); }
ccad248386a1616c3b8dc5b312072e5ea53ec603.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** See the topology https://github.com/research-team/memristive-spinal-cord/blob/master/doc/diagram/cpg_generator_FE_paper.png Based on the NEURON repository. */ //#define LOG #include <omp.h> #include <assert.h> #include <random> #include <vector> #include <string> #include "structs.h" #include <stdexcept> #include <hiprand/hiprand_kernel.h> // for file writing #include <cstdlib> #include <iostream> #include <fstream> #include <unistd.h> #include <stdio.h> #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) ) #define PI 3.141592654f using namespace std; random_device r; default_random_engine rand_gen(r()); static void HandleError(hipError_t err, const char *file, int line) { if (err != hipSuccess) { printf("!!! %s in %s at line %d\n", hipGetErrorString(err), file, line); exit(EXIT_FAILURE); } } const double dt = 0.025; // [ms] simulation step const int cv_fr = 200; // frequency of CV int step_number; // [step] number of full cycle steps const bool EXTRACELLULAR = false; unsigned int one_step_time; int skin_time; // duration of layer 25 = 21 cm/s; 50 = 15 cm/s; 125 = 6 cm/s int slices_extensor; unsigned int slice_time = 25; double E_coef; int slices_flexor; // flexor duration (125 or 175 ms for 4pedal) double cv_coef; bool str_flag = false; unsigned int sim_time; unsigned int SIM_TIME_IN_STEPS; unsigned int NRNS_NUMBER = 0; // [id] global neuron id = number of neurons unsigned int NRNS_AND_SEGS = 0; // [id] global neuron+segs id = number of neurons with segments const int neurons_in_group = 50; // number of neurons in a group const int neurons_in_ip = 196; // number of neurons in a group // common neuron constants // normal const double V_th = -40; // [mV] voltage threshold const double V_adj = -63; // [mV] adjust voltage for -55 threshold // moto neuron constants const double amA = 0.4; // const ??? todo const double amB = 66; // const ??? todo const double amC = 5; // const ??? todo const double bmA = 0.4; // const ??? todo const double bmB = 32; // const ??? todo const double bmC = 5; // const ??? todo const double ca0 = 2; // initial calcium concentration const double R_const = 8.314472; // [k-mole] or [joule/degC] const const double F_const = 96485.34; // [faraday] or [kilocoulombs] const // muscle fiber constants // const double g_kno = 0.01; // [S/cm2] conductance of the todo // const double g_kir = 0.03; // [S/cm2] conductance of the Inwardly Rectifying Potassium K+ (Kir) channel // Boltzman steady state curve const double vhalfl = -98.92; // [mV] inactivation half-potential const double kl = 10.89; // [mV] Stegen et al. 2012 // tau_infty const double vhalft = 67.0828; // [mV] fitted //100 uM sens curr 350a, Stegen et al. 2012 const double at = 0.00610779; // [/ ms] Stegen et al. 2012 const double bt = 0.0817741; // [/ ms] Note: typo in Stegen et al. 2012 // temperature dependence const double q10 = 1; // temperature scaling (sensitivity) const double celsius = 36; // [degC] temperature of the cell // i_membrane [mA/cm2] const int nlayer = 2; const double e_extracellular = 0; // [mV] const double xraxial = 1e9; // [MOhm/cm] // neuron parameters vector<unsigned int> vector_nrn_start_seg; vector<char> vector_models; vector<double> vector_Cm, vector_gnabar, vector_gkbar, vector_gl, vector_Ra, vector_diam, vector_length, vector_ena, vector_ek, vector_el, vector_gkrect, vector_gcaN, vector_gcaL, vector_gcak; // synaptic parameters vector<double> vector_E_ex, vector_E_inh, vector_tau_exc, vector_tau_inh1, vector_tau_inh2; // synapses varaibels vector<int> vector_syn_pre_nrn, vector_syn_post_nrn, vector_syn_delay, vector_syn_delay_timer; vector<double> vector_syn_weight; // results vector vector <GroupMetadata> saving_groups; // for debugging vector <Group> all_groups; // generators vector<unsigned int> vec_time_end, vec_nrn_id, vec_freq_in_steps, vec_spike_each_step; double *bimodal_distr_for_moto_neurons(const unsigned int nrnnumber) { int standby_percent = 70; double diameter_active = 27.0; double diameter_standby = 57.0; double* nrn_diameter = new double [nrnnumber]; random_device r1; default_random_engine generator1(r1()); normal_distribution<double> d_active(diameter_active, 3); normal_distribution<double> d_standby(diameter_standby, 6); int standby_size = (int) (nrnnumber * standby_percent / 100); int active_size = nrnnumber - standby_size; for (int i = 0; i < active_size; i++) nrn_diameter[i] = d_active(generator1); for (int i = active_size; i < nrnnumber; i++) nrn_diameter[i] = d_standby(generator1); return nrn_diameter; } // form structs of neurons global ID and groups name Group form_group(const string &group_name, int nrns_in_group = neurons_in_group, const char model = INTER, const int segs = 1) { /** * */ Group group = Group(); group.group_name = group_name; // name of a neurons group group.id_start = NRNS_NUMBER; // first ID in the group group.id_end = NRNS_NUMBER + nrns_in_group - 1; // the latest ID in the group group.group_size = nrns_in_group; // size of the neurons group group.model = model; double Cm, gnabar, gkbar, gl, Ra, ena, ek, el, diam, dx, gkrect, gcaN, gcaL, gcak, e_ex, e_inh, tau_exc, tau_inh1, tau_inh2; uniform_real_distribution<double> Cm_distr(0.3, 2.5); uniform_real_distribution<double> Cm_distr_muscle(2.5, 4.0); uniform_real_distribution<double> length_distr_muscle(2500, 3500); normal_distribution<double> moto_Cm_distr(2, 0.5); uniform_int_distribution<int> inter_diam_distr(5, 15); uniform_real_distribution<double> afferent_diam_distr(15, 35); uniform_real_distribution<double> gl_distr_MUSCLE(0.0005, 0.001); // 8, 12 uniform_real_distribution<double> tau_exc_distr_MUSCLE(0.33, 0.35); double* diameters; // if (model == MOTO) diameters = bimodal_distr_for_moto_neurons(nrns_in_group); for (int nrn = 0; nrn < nrns_in_group; nrn++) { if (model == INTER) { Cm = Cm_distr(rand_gen); gnabar = 0.1; gkbar = 0.08; gl = 0.002; Ra = 100.0; ena = 50.0; ek = -77.0; el = -70.0; diam = inter_diam_distr(rand_gen); dx = diam; e_ex = 50; e_inh = -80; tau_exc = 0.35; tau_inh1 = 0.5; tau_inh2 = 3.5; } else if (model == AFFERENTS) { Cm = 2; gnabar = 0.5; gkbar = 0.04; gl = 0.002; Ra = 200.0; ena = 50.0; ek = -90.0; el = -70.0; diam = afferent_diam_distr(rand_gen); // 10 dx = diam; e_ex = 50; e_inh = -80; tau_exc = 0.35; tau_inh1 = 0.5; tau_inh2 = 3.5; } else if (model == MOTO) { Cm = moto_Cm_distr(rand_gen); gnabar = 0.05; gl = 0.002; Ra = 200.0; ena = 50.0; ek = -80.0; el = -70.0; diam = diameters[nrn]; dx = diam; gkrect = 0.3; gcaN = 0.05; gcaL = 0.0001; gcak = 0.3; e_ex = 50.0; e_inh = -80.0; tau_exc = 0.3; tau_inh1 = 1.0; tau_inh2 = 1.5; if (diam > 50) { gnabar = 0.1; gcaL = 0.001; gl = 0.003; gkrect = 0.2; gcak = 0.2; } } else if (model == MUSCLE) { Cm = Cm_distr_muscle(rand_gen); gnabar = 0.03; gkbar = 0.06; // gl = 0.001; gl = gl_distr_MUSCLE(rand_gen); Ra = 1.1; ena = 55.0; ek = -90.0; el = -70.0; diam = 40.0; dx = length_distr_muscle(rand_gen); e_ex = 0.0; e_inh = -80.0; tau_exc = 0.35; // tau_exc = tau_exc_distr_MUSCLE(rand_gen); tau_inh1 = 1.0; tau_inh2 = 1.0; } else if (model == GENERATOR) { // nothing } else { throw logic_error("Choose the model"); } // common properties vector_Cm.push_back(Cm); vector_gnabar.push_back(gnabar); vector_gkbar.push_back(gkbar); vector_gl.push_back(gl); vector_el.push_back(el); vector_ena.push_back(ena); vector_ek.push_back(ek); vector_Ra.push_back(Ra); vector_diam.push_back(diam); vector_length.push_back(dx); vector_gkrect.push_back(gkrect); vector_gcaN.push_back(gcaN); vector_gcaL.push_back(gcaL); vector_gcak.push_back(gcak); vector_E_ex.push_back(e_ex); vector_E_inh.push_back(e_inh); vector_tau_exc.push_back(tau_exc); vector_tau_inh1.push_back(tau_inh1); vector_tau_inh2.push_back(tau_inh2); // vector_nrn_start_seg.push_back(NRNS_AND_SEGS); NRNS_AND_SEGS += (segs + 2); vector_models.push_back(model); // void (*foo)(int); // foo = &my_int_func; } NRNS_NUMBER += nrns_in_group; printf("Formed %s IDs [%d ... %d] = %d\n", group_name.c_str(), NRNS_NUMBER - nrns_in_group, NRNS_NUMBER - 1, nrns_in_group); // for debugging all_groups.push_back(group); return group; } __host__ unsigned int ms_to_step(double ms) { return (unsigned int) (ms / dt); } __host__ double step_to_ms(int step) { return step * dt; } // copy data from host to device template<typename type> void memcpyHtD(type *host, type *gpu, unsigned int size) { HANDLE_ERROR(hipMemcpy(gpu, host, sizeof(type) * size, hipMemcpyHostToDevice)); } // copy data from device to host template<typename type> void memcpyDtH(type *gpu, type *host, unsigned int size) { HANDLE_ERROR(hipMemcpy(host, gpu, size * sizeof(type), hipMemcpyDeviceToHost)); } // init GPU array and copy data from the CPU array template<typename type> type* init_gpu_arr(type *cpu_var, unsigned int size = NRNS_AND_SEGS) { type *gpu_var; HANDLE_ERROR(hipMalloc(&gpu_var, size * sizeof(type))); memcpyHtD<type>(cpu_var, gpu_var, size); return gpu_var; } // init GPU array and copy data from the CPU vector template<typename type> type *init_gpu_arr(vector<type> &vec) { type *gpu_var; HANDLE_ERROR(hipMalloc(&gpu_var, sizeof(type) * vec.size())); memcpyHtD<type>(vec.data(), gpu_var, vec.size()); return gpu_var; } void add_generator(Group &group, double start, double end, double freq) { vec_nrn_id.push_back(group.id_start); vec_time_end.push_back(ms_to_step(end)); vec_freq_in_steps.push_back(ms_to_step(1000 / freq)); vec_spike_each_step.push_back(ms_to_step(start)); printf("start %d end %d freq %d\n", ms_to_step(start), ms_to_step(end), ms_to_step(1000 / freq)); } // convert vector to the array template<typename type> type* vec2arr(vector<type> &vec) { return vec.cpu_vector.data(); } __device__ double Exp(double volt) { return (volt < -100)? 0 : exp(volt); } __device__ double alpham(double volt) { if (abs((volt + amB) / amC) < 1e-6) return amA * amC; return amA * (volt + amB) / (1.0 - Exp(-(volt + amB) / amC)); } __device__ double betam(double volt) { if (abs((volt + bmB) / bmC) < 1e-6) return -bmA * bmC; return -bmA * (volt + bmB) / (1.0 - Exp((volt + bmB) / bmC)); } __device__ double syn_current(Neurons* N, const Parameters* P, int nrn, double voltage) { /** * calculate synaptic current */ return N->g_exc[nrn] * (voltage - P->E_ex[nrn]) + (N->g_inh_B[nrn] - N->g_inh_A[nrn]) * (voltage - P->E_inh[nrn]); } __device__ double nrn_moto_current(States* S, const Parameters* P, Neurons* N, int nrn, int nrn_seg_index, double voltage) { /** * calculate channels current */ double iNa = P->gnabar[nrn] * pow(S->m[nrn_seg_index], 3) * S->h[nrn_seg_index] * (voltage - P->ena[nrn]); double iK = P->gkrect[nrn] * pow(S->n[nrn_seg_index], 4) * (voltage - P->ek[nrn]) + P->gcak[nrn] * pow(S->cai[nrn_seg_index], 2) / (pow(S->cai[nrn_seg_index], 2) + 0.014 * 0.014) * (voltage - P->ek[nrn]); double iL = P->gl[nrn] * (voltage - P->el[nrn]); double E_Ca = (1000.0 * R_const * 309.15 / (2.0 * F_const)) * log(ca0 / S->cai[nrn_seg_index]); S->I_Ca[nrn_seg_index] = P->gcaN[nrn] * S->mc[nrn_seg_index] * S->mc[nrn_seg_index] * S->hc[nrn_seg_index] * (voltage - E_Ca) + P->gcaL[nrn] * S->p[nrn_seg_index] * (voltage - E_Ca); return iNa + iK + iL + S->I_Ca[nrn_seg_index]; } __device__ double nrn_fastchannel_current(States* S, const Parameters* P, Neurons* N, int nrn, int nrn_seg_index, double voltage) { /** * calculate channels current */ double iNa = P->gnabar[nrn] * pow(S->m[nrn_seg_index], 3) * S->h[nrn_seg_index] * (voltage - P->ena[nrn]); double iK = P->gkbar[nrn] * pow(S->n[nrn_seg_index], 4) * (voltage - P->ek[nrn]); double iL = P->gl[nrn] * (voltage - P->el[nrn]); return iNa + iK + iL; } __device__ void recalc_synaptic(States* S, const Parameters* P, Neurons* N, int nrn) { /** * updating conductance(summed) of neurons' post-synaptic conenctions */ // exc synaptic conductance if (N->g_exc[nrn] != 0.0) { N->g_exc[nrn] -= (1.0 - exp(-dt / P->tau_exc[nrn])) * N->g_exc[nrn]; if (N->g_exc[nrn] < 1e-5) { N->g_exc[nrn] = 0.0; } } // inh1 synaptic conductance if (N->g_inh_A[nrn] != 0.0) { N->g_inh_A[nrn] -= (1.0 - exp(-dt / P->tau_inh1[nrn])) * N->g_inh_A[nrn]; if (N->g_inh_A[nrn] < 1e-5) { N->g_inh_A[nrn] = 0.0; } } // inh2 synaptic conductance if (N->g_inh_B[nrn] != 0.0) { N->g_inh_B[nrn] -= (1.0 - exp(-dt / P->tau_inh2[nrn])) * N->g_inh_B[nrn]; if (N->g_inh_B[nrn] < 1e-5) N->g_inh_B[nrn] = 0.0; } } __device__ void syn_initial(States* S, const Parameters* P, Neurons* N, int nrn) { /** * initialize tau(rise / decay time, ms) and factor(const) variables */ if (P->tau_inh1[nrn] / P->tau_inh2[nrn] > 0.9999) P->tau_inh1[nrn] = 0.9999 * P->tau_inh2[nrn]; if (P->tau_inh1[nrn] / P->tau_inh2[nrn] < 1e-9) P->tau_inh1[nrn] = P->tau_inh2[nrn] * 1e-9; // double tp = (P->tau_inh1[nrn] * P->tau_inh2[nrn]) / (P->tau_inh2[nrn] - P->tau_inh1[nrn]) * log(P->tau_inh2[nrn] / P->tau_inh1[nrn]); N->factor[nrn] = -exp(-tp / P->tau_inh1[nrn]) + exp(-tp / P->tau_inh2[nrn]); N->factor[nrn] = 1.0 / N->factor[nrn]; } __device__ void nrn_inter_initial(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * initialize channels, based on cropped evaluate_fct function */ double V_mem = V - V_adj; // double a = 0.32 * (13.0 - V_mem) / (exp((13.0 - V_mem) / 4.0) - 1.0); double b = 0.28 * (V_mem - 40.0) / (exp((V_mem - 40.0) / 5.0) - 1.0); S->m[nrn_seg_index] = a / (a + b); // m_inf // a = 0.128 * exp((17.0 - V_mem) / 18.0); b = 4.0 / (1.0 + exp((40.0 - V_mem) / 5.0)); S->h[nrn_seg_index] = a / (a + b); // h_inf // a = 0.032 * (15.0 - V_mem) / (exp((15.0 - V_mem) / 5.0) - 1.0); b = 0.5 * exp((10.0 - V_mem) / 40.0); S->n[nrn_seg_index] = a / (a + b); // n_inf } __device__ void nrn_moto_initial(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * initialize channels, based on cropped evaluate_fct function */ double a = alpham(V); S->m[nrn_seg_index] = a / (a + betam(V)); // m_inf S->h[nrn_seg_index] = 1.0 / (1.0 + Exp((V + 65.0) / 7.0)); // h_inf S->p[nrn_seg_index] = 1.0 / (1.0 + Exp(-(V + 55.8) / 3.7)); // p_inf S->n[nrn_seg_index] = 1.0 / (1.0 + Exp(-(V + 38.0) / 15.0)); // n_inf S->mc[nrn_seg_index] = 1.0 / (1.0 + Exp(-(V + 32.0) / 5.0)); // mc_inf S->hc[nrn_seg_index] = 1.0 / (1.0 + Exp((V + 50.0) / 5.0)); // hc_inf S->cai[nrn_seg_index] = 0.0001; } __device__ void nrn_muslce_initial(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * initialize channels, based on cropped evaluate_fct function */ double V_mem = V - V_adj; // m_inf double a = 0.32 * (13.0 - V_mem) / (exp((13.0 - V_mem) / 4.0) - 1.0); double b = 0.28 * (V_mem - 40.0) / (exp((V_mem - 40.0) / 5.0) - 1.0); S->m[nrn_seg_index] = a / (a + b); // h_inf a = 0.128 * exp((17.0 - V_mem) / 18.0); b = 4.0 / (1.0 + exp((40.0 - V_mem) / 5.0)); S->h[nrn_seg_index] = a / (a + b); // n_inf a = 0.032 * (15.0 - V_mem) / (exp((15.0 - V_mem) / 5.0) - 1.0); b = 0.5 * exp((10.0 - V_mem) / 40.0); S->n[nrn_seg_index] = a / (a + b); } __device__ void recalc_inter_channels(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * calculate new states of channels (evaluate_fct) */ // BREAKPOINT -> states -> evaluate_fct double V_mem = V - V_adj; // double a = 0.32 * (13.0 - V_mem) / (exp((13.0 - V_mem) / 4.0) - 1.0); double b = 0.28 * (V_mem - 40.0) / (exp((V_mem - 40.0) / 5.0) - 1.0); double tau = 1.0 / (a + b); double inf = a / (a + b); S->m[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->m[nrn_seg_index]); // a = 0.128 * exp((17.0 - V_mem) / 18.0); b = 4.0 / (1.0 + exp((40.0 - V_mem) / 5.0)); tau = 1.0 / (a + b); inf = a / (a + b); S->h[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->h[nrn_seg_index]); // a = 0.032 * (15.0 - V_mem) / (exp((15.0 - V_mem) / 5.0) - 1.0); b = 0.5 * exp((10.0 - V_mem) / 40.0); tau = 1.0 / (a + b); inf = a / (a + b); // states S->n[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->n[nrn_seg_index]); } __device__ void recalc_moto_channels(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * calculate new states of channels (evaluate_fct) */ // BREAKPOINT -> states -> evaluate_fct double a = alpham(V); double b = betam(V); // m double tau = 1.0 / (a + b); double inf = a / (a + b); S->m[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->m[nrn_seg_index]); // h tau = 30.0 / (Exp((V + 60.0) / 15.0) + Exp(-(V + 60.0) / 16.0)); inf = 1.0 / (1 + Exp((V + 65.0) / 7.0)); S->h[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->h[nrn_seg_index]); // DELAYED RECTIFIER POTASSIUM tau = 5.0 / (Exp((V + 50.0) / 40.0) + Exp(-(V + 50.0) / 50.0)); inf = 1.0 / (1.0 + Exp(-(V + 38.0) / 15.0)); S->n[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->n[nrn_seg_index]); // CALCIUM DYNAMICS N-type double mc_inf = 1.0 / (1.0 + Exp(-(V + 32.0) / 5.0)); double hc_inf = 1.0 / (1.0 + Exp((V + 50.0) / 5.0)); // CALCIUM DYNAMICS L-type tau = 400.0; inf = 1.0 / (1.0 + Exp(-(V + 55.8) / 3.7)); S->p[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->p[nrn_seg_index]); // states S->mc[nrn_seg_index] += (1.0 - exp(-dt / 15.0)) * (mc_inf - S->mc[nrn_seg_index]); // tau_mc = 15 S->hc[nrn_seg_index] += (1.0 - exp(-dt / 50.0)) * (hc_inf - S->hc[nrn_seg_index]); // tau_hc = 50 S->cai[nrn_seg_index] += (1.0 - exp(-dt * 0.04)) * (-0.01 * S->I_Ca[nrn_seg_index] / 0.04 - S->cai[nrn_seg_index]); } __device__ void recalc_muslce_channels(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * calculate new states of channels (evaluate_fct) */ // BREAKPOINT -> states -> evaluate_fct double V_mem = V - V_adj; // double a = 0.32 * (13.0 - V_mem) / (exp((13.0 - V_mem) / 4.0) - 1.0); double b = 0.28 * (V_mem - 40.0) / (exp((V_mem - 40.0) / 5.0) - 1.0); double tau = 1.0 / (a + b); double inf = a / (a + b); S->m[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->m[nrn_seg_index]); // a = 0.128 * exp((17.0 - V_mem) / 18.0); b = 4.0 / (1.0 + exp((40.0 - V_mem) / 5.0)); tau = 1.0 / (a + b); inf = a / (a + b); S->h[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->h[nrn_seg_index]); // a = 0.032 * (15.0 - V_mem) / (exp((15.0 - V_mem) / 5.0) - 1.0); b = 0.5 * exp((10.0 - V_mem) / 40.0); tau = 1.0 / (a + b); inf = a / (a + b); S->n[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->n[nrn_seg_index]); // // // double qt = pow(q10, (celsius - 33.0) / 10.0); // double linf = 1.0 / (1.0 + exp((V - vhalfl) / kl)); // l_steadystate // double taul = 1.0 / (qt * (at * exp(-V / vhalft) + bt * exp(V / vhalft))); // double alpha = 0.3 / (1.0 + exp((V + 43.0) / -5.0)); // double beta = 0.03 / (1.0 + exp((V + 80.0) / -1.0)); // double stau = 1.0 / (alpha + beta); // double sinf = alpha / (alpha + beta); // // states // S->l[nrn_seg_index] += (1.0 - exp(-dt / taul)) * (linf - S->l[nrn_seg_index]); // S->s[nrn_seg_index] += (1.0 - exp(-dt / stau)) * (sinf - S->s[nrn_seg_index]); } __device__ void nrn_rhs_ext(States* S, const Parameters* P, Neurons* N, int i1, int i3) { /** * void nrn_rhs_ext(NrnThread* _nt) */ int size = S->ext_size; const double xg[5] = {0, 1e9, 1e9, 1e9, 0}; // for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { for (int layer = 0; layer < nlayer; ++layer) { // zeroed at nrn_rhs before nrn_rhs_ext S->EXT_RHS[nrn_seg + layer * size] = 0; } S->EXT_RHS[nrn_seg + 0 * size] -= S->NODE_RHS[nrn_seg]; } #ifdef LOG printf("nrn_rhs_ext::EXT RHS 0 : "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_RHS[ii]); printf("\n"); printf("nrn_rhs_ext::EXT RHS 1 : "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_RHS[ii + size]); printf("\n"); #endif double x, dv; for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { #ifdef LOG printf("V: %g, %g\n", S->EXT_V[nrn_seg], S->EXT_V[nrn_seg + size]); #endif for (int layer = 0; layer < nlayer; ++layer) { dv = S->EXT_V[nrn_seg - 1 + layer * size] - S->EXT_V[nrn_seg + layer * size]; S->EXT_RHS[nrn_seg + layer * size] -= S->EXT_B[nrn_seg + layer * size] * dv; S->EXT_RHS[nrn_seg - 1 + layer * size] += S->EXT_A[nrn_seg + layer * size] * dv; } int layer = nlayer - 1; S->EXT_RHS[nrn_seg + layer * size] -= xg[nrn_seg - i1] * (S->EXT_RHS[nrn_seg + layer * size] - e_extracellular); for (--layer; layer >= 0; --layer) { /* between j and j+1 layer */ x = xg[nrn_seg - i1] * (S->EXT_V[nrn_seg + layer * size] - S->EXT_V[nrn_seg + (layer + 1) * size]); S->EXT_RHS[nrn_seg + layer * size] -= x; S->EXT_RHS[nrn_seg + (layer + 1) * size] += x; } } #ifdef LOG printf("nrn_rhs_ext::EXT RHS END 0 : "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_RHS[ii]); printf("\n"); printf("nrn_rhs_ext::EXT RHS END 1 : "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_RHS[ii + size]); printf("\n"); #endif } __device__ void nrn_setup_ext(States* S, const Parameters* P, Neurons* N, int i1, int i3) { /** * void nrn_setup_ext(NrnThread* _nt) */ double cj = 1 / dt; double cfac = 0.001 * cj; int size = S->ext_size; const double xg[5] = {0, 1e9, 1e9, 1e9, 0}; const double xc[5] = {0, 0, 0, 0, 0}; // todo find the place where it is zeroed for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) for (int layer = 0; layer < nlayer; ++layer) S->EXT_D[nrn_seg + layer * size] = 0; // d contains all the membrane conductances (and capacitance) // i.e. (cm/dt + di/dvm - dis/dvi)*[dvi] and (dis/dvi)*[dvx] for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { // nde->_d only has -ELECTRODE_CURRENT contribution S->EXT_D[nrn_seg + 0 * size] += S->NODE_D[nrn_seg]; } // NEURON D 0 = [0 0.1442 0.1442 0.1442 0 ] [0 0 0 0 0] // GRAS [0 0.1442 0.1442 0.1442 0 ] [0 0 0 0 0] // series resistance, capacitance, and axial terms for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { // series resistance and capacitance to ground int layer = 0; while (1) { double mfac = xg[nrn_seg - i1] + xc[nrn_seg - i1] * cfac; S->EXT_D[nrn_seg + layer * size] += mfac; layer += 1; if (layer == nlayer) break; S->EXT_D[nrn_seg + layer * size] += mfac; } // axial connections for (layer = 0; layer < nlayer; ++layer) { S->EXT_D[nrn_seg + layer * size] -= S->EXT_B[nrn_seg + layer * size]; S->EXT_D[nrn_seg - 1 + layer * size] -= S->EXT_A[nrn_seg + layer * size]; } } // NEURON D[0] = [2e-08 1e+09 1e+09 1e+09 2e-08 ] GRAS [2e-08 1e+09 1e+09 1e+09 2e-08] // NEURON D[1] = [2e-08 2e+09 2e+09 2e+09 2e-08 ] GRAS [2e-08 2e+09 2e+09 2e+09 2e-08] } __device__ void nrn_update_2d(States* S, const Parameters* P, Neurons* N, int i1, int i3) { /** * void nrn_update_2d(NrnThread* nt) * update has already been called so modify nd->v based on dvi we only need to * update extracellular nodes and base the corresponding nd->v on dvm (dvm = dvi - dvx) */ // final voltage updating int size = S->ext_size; for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) for (int layer = 0; layer < nlayer; ++layer) S->EXT_V[nrn_seg + layer * size] += S->EXT_RHS[nrn_seg + layer * size]; } __device__ void nrn_rhs(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void nrn_rhs(NrnThread *_nt) combined with the first part of nrn_lhs * calculate right hand side of * cm*dvm/dt = -i(vm) + is(vi) + ai_j*(vi_j - vi) * cx*dvx/dt - cm*dvm/dt = -gx*(vx - ex) + i(vm) + ax_j*(vx_j - vx) * This is a common operation for fixed step, cvode, and daspk methods */ // init _rhs and _lhs (NODE_D) as zero for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { S->NODE_RHS[nrn_seg] = 0; // replace the process: init by 0, add Cm*frac, add A and B } // update MOD rhs, CAPS has no current [CAP MOD CAP]! // int center_segment = i1 + ((P->models[nrn] == MUSCLE)? 2 : 1); // update segments except CAPs double V, _rhs; for (int nrn_seg = i1 + 1; nrn_seg < i3 - 1; ++nrn_seg) { V = S->Vm[nrn_seg]; // SYNAPTIC update // static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type) _rhs = syn_current(N, P, nrn, V); _rhs *= 1.e2 / S->NODE_AREA[nrn_seg]; S->NODE_RHS[nrn_seg] -= _rhs; // NEURON update // static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type) if (P->models[nrn] == INTER || P->models[nrn] == AFFERENTS) { // muscle and inter has the same fast_channel function _rhs = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V); } else if (P->models[nrn] == MOTO) { _rhs = nrn_moto_current(S, P, N, nrn, nrn_seg, V); } else if (P->models[nrn] == MUSCLE) { // muscle and inter has the same fast_channel function _rhs = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V); } else { printf("\nERROR\n"); } // save data like in NEURON (after .mod nrn_cur) S->NODE_RHS[nrn_seg] -= _rhs; // note that CAP has no jacob } // end FOR segments if (EXTRACELLULAR) { // Cannot have any axial terms yet so that i(vm) can be calculated from // i(vm)+is(vi) and is(vi) which are stored in rhs vector. nrn_rhs_ext(S, P, N, i1, i3); // nrn_rhs_ext has also computed the the internal axial current for those // nodes containing the extracellular mechanism } double dv; for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { dv = S->Vm[nrn_seg - 1] - S->Vm[nrn_seg]; // our connection coefficients are negative so S->NODE_RHS[nrn_seg] -= S->NODE_B[nrn_seg] * dv; S->NODE_RHS[nrn_seg - 1] += S->NODE_A[nrn_seg] * dv; } #ifdef LOG printf("RHS EXRTA 0 : "); for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) printf("%g\t", S->EXT_RHS[nrn_seg]); printf("\n"); printf("RHS EXRTA 1 : "); for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) printf("%g\t", S->EXT_RHS[nrn_seg + S->ext_size]); printf("\n"); #endif } __device__ void nrn_lhs(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** calculate left hand side of * cm*dvm/dt = -i(vm) + is(vi) + ai_j*(vi_j - vi) * cx*dvx/dt - cm*dvm/dt = -gx*(vx - ex) + i(vm) + ax_j*(vx_j - vx) * with a matrix so that the solution is of the form [dvm+dvx,dvx] on the right hand side after solving. */ for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { S->NODE_D[nrn_seg] = 0; } // update MOD rhs, CAPS has no current [CAP MOD CAP]! // int center_segment = i1 + ((P->models[nrn] == MUSCLE)? 2 : 1); // update segments except CAPs double V, _g, _rhs; for (int nrn_seg = i1 + 1; nrn_seg < i3 - 1; ++nrn_seg) { V = S->Vm[nrn_seg]; // SYNAPTIC update // static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type) _g = syn_current(N, P, nrn, V + 0.001); _rhs = syn_current(N, P, nrn, V); _g = (_g - _rhs) / 0.001; _g *= 1.e2 / S->NODE_AREA[nrn_seg]; _rhs *= 1.e2 / S->NODE_AREA[nrn_seg]; // static void nrn_jacob(_NrnThread* _nt, _Memb_list* _ml, int _type) S->NODE_D[nrn_seg] += _g; // void nrn_cap_jacob(NrnThread* _nt, Memb_list* ml) { S->NODE_D[nrn_seg] += S->const_NODE_D[nrn_seg]; // activsynapse_lhs() // activclamp_lhs() // NEURON update // static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type) if (P->models[nrn] == INTER || P->models[nrn] == AFFERENTS) { // muscle and inter has the same fast_channel function _g = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V + 0.001); _rhs = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V); } else if (P->models[nrn] == MOTO) { _g = nrn_moto_current(S, P, N, nrn, nrn_seg, V + 0.001); _rhs = nrn_moto_current(S, P, N, nrn, nrn_seg, V); } else if (P->models[nrn] == MUSCLE) { // muscle and inter has the same fast_channel function _g = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V + 0.001); _rhs = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V); } else { assert(false); } // save data like in NEURON (after .mod nrn_cur) _g = (_g - _rhs) / 0.001; // note that CAP has no jacob S->NODE_D[nrn_seg] += _g; } // end FOR segments if (EXTRACELLULAR) { nrn_setup_ext(S, P, N, i1, i3); } // activstim_rhs() // activclamp_rhs() // at this point d contains all the membrane conductances // now add the axial currents for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { S->NODE_D[nrn_seg] -= S->NODE_B[nrn_seg]; S->NODE_D[nrn_seg - 1] -= S->NODE_A[nrn_seg]; } #ifdef LOG printf("NODED axial:\t"); for (int i = i1; i < i3; ++i) printf("%g\t", S->NODE_D[i]); printf("\n"); #endif } __device__ void bksub(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void bksub(NrnThread* _nt) */ // intracellular S->NODE_RHS[i1] /= S->NODE_D[i1]; // for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { S->NODE_RHS[nrn_seg] -= S->NODE_B[nrn_seg] * S->NODE_RHS[nrn_seg - 1]; S->NODE_RHS[nrn_seg] /= S->NODE_D[nrn_seg]; } // extracellular // if (EXTRACELLULAR) { // int size = S->ext_size; // for (int layer = 0; layer < nlayer; ++layer) { // S->EXT_RHS[i1 + layer * size] /= S->EXT_D[i1 + layer * size]; // } // for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { // for (int layer = 0; layer < nlayer; ++layer) { // S->EXT_RHS[nrn_seg + layer * size] -= S->EXT_B[nrn_seg + layer * size] * S->EXT_RHS[nrn_seg - 1 + layer * size]; // S->EXT_RHS[nrn_seg + layer * size] /= S->EXT_D[nrn_seg + layer * size]; // } // } // } } __device__ void triang(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void triang(NrnThread* _nt) */ // intracellular double ppp; for (int nrn_seg = i3 - 1; nrn_seg >= i1 + 1; --nrn_seg) { ppp = S->NODE_A[nrn_seg] / S->NODE_D[nrn_seg]; S->NODE_D[nrn_seg - 1] -= ppp * S->NODE_B[nrn_seg]; S->NODE_RHS[nrn_seg - 1] -= ppp * S->NODE_RHS[nrn_seg]; } // extracellular // if (EXTRACELLULAR) { // int size = S->ext_size; // for (int nrn_seg = i3 - 1; nrn_seg >= i1 + 1; --nrn_seg) { // for (int layer = 0; layer < nlayer; ++layer) { // ppp = S->EXT_A[nrn_seg + layer * size] / S->EXT_D[nrn_seg + layer * size]; // S->EXT_D[nrn_seg - 1 + layer * size] -= ppp * S->EXT_B[nrn_seg + layer * size]; // S->EXT_RHS[nrn_seg - 1 + layer * size] -= ppp * S->EXT_RHS[nrn_seg + layer * size]; // nrn_seg--; // } // } // } } __device__ void nrn_solve(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void nrn_solve(NrnThread* _nt) */ #ifdef LOG printf("SOLVE EXT begin NODED 0 "); for (int i=i1; i < i3; ++i) printf("%g\t", S->EXT_D[i]); printf("\n"); printf("SOLVE EXT begin NODED 0 "); for (int i=i1; i < i3; ++i) printf("%g\t", S->EXT_D[i + S->ext_size]); printf("\n"); #endif // TODO PROOVED // nrn_solve EXT D 0 5e+07 2e-09 2e-09 1e-09 5e+07 // nrn_solve EXT D 1 5e+07 5e-10 5e-10 1e-09 5e+07 triang(S, P, N, nrn, i1, i3); bksub(S, P, N, nrn, i1, i3); } __device__ void setup_tree_matrix(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void setup_tree_matrix(NrnThread* _nt) */ nrn_rhs(S, P, N, nrn, i1, i3); nrn_lhs(S, P, N, nrn, i1, i3); } __device__ void update(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void update(NrnThread* _nt) */ #ifdef LOG printf("UPDATE EXT begin NODED 0 "); for (int i=i1; i < i3; ++i) printf("%g\t", S->EXT_D[i]); printf("\n"); printf("UPDATE EXT begin NODED 1 "); for (int i=i1; i < i3; ++i) printf("%g\t", S->EXT_D[i + S->ext_size]); printf("\n"); // final voltage updating #endif for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { S->Vm[nrn_seg] += S->NODE_RHS[nrn_seg]; } // extracellular if (EXTRACELLULAR) { nrn_update_2d(S, P, N, i1, i3); } } __device__ void nrn_deliver_events(States* S, const Parameters* P, Neurons* N, int nrn) { /** * void nrn_deliver_events(NrnThread* nt) */ // get the central segment (for detecting spikes): i1 + (2 or 1) int seg_update = P->nrn_start_seg[nrn] + ((P->models[nrn] == MUSCLE)? 2 : 1); // check if neuron has spike with special flag for avoidance multi-spike detecting if (!N->spike_on[nrn] && S->Vm[seg_update] > V_th && N->ref_time_timer[nrn] == 0) { N->spike_on[nrn] = true; N->has_spike[nrn] = true; N->ref_time_timer[nrn] = N->ref_time[nrn]; } else if (S->Vm[seg_update] < V_th) { N->spike_on[nrn] = false; } } __device__ void nrn_fixed_step_lastpart(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void *nrn_fixed_step_lastpart(NrnThread *nth) */ // update neurons' synapses state recalc_synaptic(S, P, N, nrn); // update neurons' segments state if (P->models[nrn] == INTER || P->models[nrn] == AFFERENTS) { for(int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { recalc_inter_channels(S, P, N, nrn_seg, S->Vm[nrn_seg]); } } else if (P->models[nrn] == MOTO) { for(int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { recalc_moto_channels(S, P, N, nrn_seg, S->Vm[nrn_seg]); } } else if (P->models[nrn] == MUSCLE) { for(int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { recalc_muslce_channels(S, P, N, nrn_seg, S->Vm[nrn_seg]); } } else { assert(false); } // spike detection for (in synapse kernel) nrn_deliver_events(S, P, N, nrn); } __device__ void nrn_area_ri(States* S, const Parameters* P, Neurons* N) { /** * void nrn_area_ri(Section *sec) [790] treeset.c * area for right circular cylinders. Ri as right half of parent + left half of this */ printf("GPU: nrn_area_ri\n"); double dx, rleft, rright; int i1, i3, nrn_seg, segments; // for (int nrn = 0; nrn < N->size; ++nrn) { if (P->models[nrn] == GENERATOR) continue; i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; segments = (i3 - i1 - 2); dx = P->length[nrn] / segments; // divide by the last index of node (or segments count) rright = 0; // todo sec->pnode needs +1 index for (nrn_seg = i1 + 1; nrn_seg < i1 + segments + 1; ++nrn_seg) { // area for right circular cylinders. Ri as right half of parent + left half of this S->NODE_AREA[nrn_seg] = PI * dx * P->diam[nrn]; rleft = 1.e-2 * P->Ra[nrn] * (dx / 2.0) / (PI * pow(P->diam[nrn], 2) / 4.0); // left half segment Megohms S->NODE_RINV[nrn_seg] = 1.0 / (rleft + rright); // uS rright = rleft; } //the first and last segments has zero length. Area is 1e2 in dimensionless units S->NODE_AREA[i1] = 100.0; nrn_seg = i1 + segments + 1; // the last segment S->NODE_AREA[nrn_seg] = 100.0; S->NODE_RINV[nrn_seg] = 1.0 / rright; } } __device__ void ext_con_coef(States* S, const Parameters* P, Neurons* N) { /** * void ext_con_coef(void) * setup a and b */ double dx; int layer, i1, i3, segments, size = S->ext_size; // todo: extracellular only for those neurons who need for (int nrn = 0; nrn < N->size; ++nrn) { if (P->models[nrn] == GENERATOR) continue; layer = 0; i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; segments = (i3 - i1 - 2); // temporarily store half segment resistances in rhs for (int nrn_seg = i1 + 1; nrn_seg < i3 - 1; ++nrn_seg) { dx = P->length[nrn] / segments; S->EXT_RHS[nrn_seg + layer * size] = 1e-4 * xraxial * dx / 2; // Megohms } // last segment has 0 length S->EXT_RHS[i3 - 1 + layer * size] = 0; // todo i3 -1 or just i3 // NEURON RHS = [5e+07 5e+07 5e+07 0 ] // GRAS RHS = [0 5e+07 5e+07 5e+07 0 ] #ifdef LOG printf("EXT RHS: "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_RHS[ii]); printf("\n"); #endif // node half resistances in general get added to the node and to the node's "child node in the same section". // child nodes in different sections don't involve parent node's resistance S->EXT_B[i1 + 1 + layer * size] = S->EXT_RHS[i1 + 1 + layer * size]; for (int nrn_seg = i1 + 1 + 1; nrn_seg < i3; ++nrn_seg) { S->EXT_B[nrn_seg + layer * size] = S->EXT_RHS[nrn_seg + layer * size] + S->EXT_RHS[nrn_seg - 1 + layer * size]; // Megohms } // NEURON B = [5e+07 1e+08 1e+08 5e+07 ] // GRAS B = [0 5e+07 1e+08 1e+08 5e+07 ] #ifdef LOG printf("EXT B: "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_B[ii]); printf("\n"); #endif // first the effect of node on parent equation. Note That last nodes have area = 1.e2 in // dimensionless units so that last nodes have units of microsiemens's double area = S->NODE_AREA[i1]; // parentnode index of sec is 0 double rall_branch = 1.0; // sec->prop->dparam[4].val S->EXT_A[i1 + 1 + layer * size] = -1.e2 * rall_branch / (S->EXT_B[i1 + 1 + layer * size] * area); for (int nrn_seg = i1 + 1 + 1; nrn_seg < i3; ++nrn_seg) { area = S->NODE_AREA[nrn_seg - 1]; S->EXT_A[nrn_seg + layer * size] = -1.e2 / (S->EXT_B[nrn_seg + layer * size] * area); } // NEURON A = [-2e-08 -7.95775e-12 -7.95775e-12 -1.59155e-11 ] // GRAS A = [0 -2e-08 -7.95775e-12 -7.95775e-12 -1.59155e-11 ] #ifdef LOG printf("EXT A: "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_A[ii]); printf("\n"); #endif // now the effect of parent on node equation // todo sec->pnode needs +1 index for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { S->EXT_B[nrn_seg + layer * size] = -1.e2 / (S->EXT_B[nrn_seg + layer * size] * S->NODE_AREA[nrn_seg]); } // NEURON B = [-1.59155e-11 -7.95775e-12 -7.95775e-12 -2e-08 ] // GRAS B = [0 -1.59155e-11 -7.95775e-12 -7.95775e-12 -2e-08 ] #ifdef LOG printf("EXT B END: "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_B[ii]); printf("\n"); #endif // the same for other layers for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { S->EXT_A[nrn_seg + 1 * size] = S->EXT_A[nrn_seg + 0 * size]; S->EXT_B[nrn_seg + 1 * size] = S->EXT_B[nrn_seg + 0 * size]; S->EXT_RHS[nrn_seg + 1 * size] = S->EXT_RHS[nrn_seg + 0 * size]; } } } __device__ void connection_coef(States* S, const Parameters* P, Neurons* N) { /** * void connection_coef(void) treeset.c */ printf("GPU: connection_coef\n"); nrn_area_ri(S, P, N); int i1, i3, nrn_seg, segments; // for (int nrn = 0; nrn < N->size; ++nrn) { if (P->models[nrn] == GENERATOR) continue; i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; segments = (i3 - i1 - 2); // first the effect of node on parent equation. Note that last nodes have area = 1.e2 in dimensionless // units so that last nodes have units of microsiemens // todo sec->pnode needs +1 index nrn_seg = i1 + 1; // sec->prop->dparam[4].val = 1, what is dparam[4].val S->NODE_A[nrn_seg] = -1.e2 * 1.0 * S->NODE_RINV[nrn_seg] / S->NODE_AREA[nrn_seg - 1]; // todo sec->pnode needs +1 index for (nrn_seg = i1 + 1 + 1; nrn_seg < i1 + segments + 1 + 1; ++nrn_seg) { S->NODE_A[nrn_seg] = -1.e2 * S->NODE_RINV[nrn_seg] / S->NODE_AREA[nrn_seg - 1]; } // now the effect of parent on node equation // todo sec->pnode needs +1 index for (nrn_seg = i1 + 1; nrn_seg < i1 + segments + 1 + 1; ++nrn_seg) { S->NODE_B[nrn_seg] = -1.e2 * S->NODE_RINV[nrn_seg] / S->NODE_AREA[nrn_seg]; } } // for extracellular if (EXTRACELLULAR) { ext_con_coef(S, P, N); } /** * note: from LHS, this functions just recalc each time the constant NODED (!) * void nrn_lhs(NrnThread *_nt) * NODE_D[nrn, nd] updating is located at nrn_rhs, because _g is not the global variable */ // nt->cj = 2/dt if (secondorder) else 1/dt // note, the first is CAP // function nrn_cap_jacob(_nt, _nt->tml->ml); double cj = 1.0 / dt; double cfac = 0.001 * cj; for (int nrn = 0; nrn < N->size; ++nrn) { if (P->models[nrn] == GENERATOR) continue; i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; // nrn_cap_jacob for (nrn_seg = i1 + 1; nrn_seg < i3 - 1; ++nrn_seg) { S->const_NODE_D[nrn_seg] += cfac * P->Cm[nrn]; } } } __global__ void initialization_kernel(hiprandState_t *state, States* S, const Parameters* P, Neurons* N, double v_init) { /** * */ if (blockIdx.x * blockDim.x + threadIdx.x == 0) { int i1, i3; printf("GPU: initialization_kernel\n"); // connection_coef(S, P, N); // for different models -- different init function for (int nrn = 0; nrn < N->size; ++nrn) { // init random hiprand_init(7 + nrn, nrn, 0, &state[nrn]); // do not init neuron state for generator if (P->models[nrn] == GENERATOR) continue; i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; // for each segment init the neuron model for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { S->Vm[nrn_seg] = v_init; if (P->models[nrn] == INTER || P->models[nrn] == AFFERENTS) { nrn_inter_initial(S, P, N, nrn_seg, v_init); } else if (P->models[nrn] == MOTO) { nrn_moto_initial(S, P, N, nrn_seg, v_init); } else if (P->models[nrn] == MUSCLE) { nrn_muslce_initial(S, P, N, nrn_seg, v_init); } else { assert(false); } } // init RHS/LHS setup_tree_matrix(S, P, N, nrn, i1, i3); // init tau synapses syn_initial(S, P, N, nrn); } } } void conn_generator(Group &generator, Group &post_neurons, double delay, double weight, int indegree=50) { /** * todo */ uniform_int_distribution<int> nsyn_distr(indegree, indegree + 5); normal_distribution<double> delay_distr(delay, delay / 5); normal_distribution<double> weight_distr(weight, weight / 6); // int nsyn = nsyn_distr(rand_gen); printf("Connect generator %s [%d..%d] to %s [%d..%d] (1:%d). Synapses %d, D=%.1f, W=%.2f\n", generator.group_name.c_str(), generator.id_start, generator.id_end, post_neurons.group_name.c_str(), post_neurons.id_start, post_neurons.id_end, post_neurons.group_size, generator.group_size * post_neurons.group_size, delay, weight); // int nsyn = nsyn_distr(rand_gen); int gen_id = generator.id_start; if (generator.group_size > 1) { printf("Generator cannot include more than 1 neuron!\n"); exit(0); } for (int post = post_neurons.id_start; post <= post_neurons.id_end; ++post) { for (int i = 0; i < nsyn; ++i) { vector_syn_pre_nrn.push_back(gen_id); vector_syn_post_nrn.push_back(post); vector_syn_weight.push_back(weight_distr(rand_gen)); vector_syn_delay.push_back(ms_to_step(delay_distr(rand_gen))); vector_syn_delay_timer.push_back(-1); } } } void connect_fixed_indegree(Group &pre_neurons, Group &post_neurons, double delay, double weight, int indegree=50, short high_distr=0) { /** * */ // STR if (str_flag && weight < 0) weight = 0; if (post_neurons.model == INTER) { printf("POST INTER "); weight /= 11.0; } uniform_int_distribution<int> nsyn_distr(indegree - 15, indegree); uniform_int_distribution<int> pre_nrns_ids(pre_neurons.id_start, pre_neurons.id_end); double d_spread, w_spread; double d_left, d_right, w_left, w_right = 0; if (high_distr == 0) { d_spread = 0; //delay / 6; w_spread = 0; //weight / 6; } else if (high_distr == 1) { d_spread = delay / 5; w_spread = weight / 5.5; } else if (high_distr == 2) { d_spread = delay / 3.5; w_spread = weight / 2.5; } else if (high_distr == 3) { d_spread = delay / 1.2; w_spread = weight / 1.1; d_left = delay - d_spread; d_right = delay + d_spread; w_left = weight - w_spread; w_right = weight + w_spread + w_spread / 2; } else if (high_distr == 4) { d_spread = delay / 3; w_spread = weight / 3; d_left = delay - d_spread; d_right = delay + d_spread; w_left = weight - w_spread; w_right = weight + w_spread + w_spread / 2; } else if (high_distr == 5) { d_spread = delay / 1.1; w_spread = weight / 1.1; d_left = delay - d_spread; d_right = delay + d_spread + delay * 1.5; w_left = weight - w_spread; w_right = weight + w_spread + w_spread; }else { logic_error("distr only 0 1 2"); } normal_distribution<double> delay_distr(delay, d_spread); normal_distribution<double> weight_distr(weight, w_spread); uniform_real_distribution<double> delay_distr_U(d_left, d_right); uniform_real_distribution<double> weight_distr_U(w_left, w_right); auto nsyn = nsyn_distr(rand_gen); printf("Connect indegree %s [%d..%d] to %s [%d..%d] (1:%d). Synapses %d, D=%.1f, W=%.2f\n", pre_neurons.group_name.c_str(), pre_neurons.id_start, pre_neurons.id_end, post_neurons.group_name.c_str(), post_neurons.id_start, post_neurons.id_end, indegree, post_neurons.group_size * indegree, delay, weight); // int prerand = 0; double tmp_w = 0; double tmp_d = 0; for (int post = post_neurons.id_start; post <= post_neurons.id_end; ++post) { for (int i = 0; i < nsyn; ++i) { prerand = pre_nrns_ids(rand_gen); vector_syn_pre_nrn.push_back(prerand); vector_syn_post_nrn.push_back(post); if (post_neurons.model == AFFERENTS) { vector_syn_weight.push_back(weight); vector_syn_delay.push_back(ms_to_step(delay)); } else { if (high_distr == 3 || high_distr == 4 || high_distr == 5) { tmp_w = weight_distr_U(rand_gen); tmp_d = delay_distr_U(rand_gen); } else { tmp_w = weight_distr(rand_gen); if (tmp_w <= 0) { tmp_w = weight; } tmp_d = delay_distr(rand_gen); if (tmp_d <= 0.01) { tmp_d = delay; } } vector_syn_weight.push_back(tmp_w); vector_syn_delay.push_back(ms_to_step(tmp_d)); } vector_syn_delay_timer.push_back(-1); } } } void connect_fixed_outdegree_MUSCLE(Group &pre_neurons, Group &post_neurons, double delay, double weight, int indegree=50, short high_distr=0) { /** * */ uniform_int_distribution<int> nsyn_distr(indegree - 15, indegree); // double d_spread, w_spread; // if (high_distr == 0) { // d_spread = 0;//delay / 6; // w_spread = 0;//weight / 6; // } else if (high_distr == 1) { // d_spread = delay / 5; // w_spread = weight / 5.5; // } else if (high_distr == 2) { // d_spread = delay / 4; // w_spread = weight / 4; // }else { // logic_error("distr only 0 1 2"); // } // normal_distribution<double> delay_distr(delay, d_spread); // normal_distribution<double> weight_distr(weight, w_spread); auto nsyn = nsyn_distr(rand_gen); printf("Connect indegree %s [%d..%d] to %s [%d..%d] (1:%d). Synapses %d, D=%.1f, W=%.2f\n", pre_neurons.group_name.c_str(), pre_neurons.id_start, pre_neurons.id_end, post_neurons.group_name.c_str(), post_neurons.id_start, post_neurons.id_end, indegree, post_neurons.group_size * indegree, delay, weight); // int shift, post_rand = 0; double tmp_w, tmp_d = 0; double d_left, d_right, w_left, w_right, d_spread, w_spread = 0; if (high_distr == 5) { d_spread = delay / 1.1; w_spread = weight / 1.1; d_left = delay - d_spread; d_right = delay + d_spread + delay * 1.5; w_left = weight - w_spread; w_right = weight + w_spread + w_spread; } else { d_left = delay; d_right = delay + 3; w_left = weight - weight / 1.5; w_right = weight; } uniform_real_distribution<double> delay_distr_U(d_left, d_right); uniform_real_distribution<double> weight_distr_U(w_left, w_right); int m_start = post_neurons.id_start; for (int pre = pre_neurons.id_start; pre <= pre_neurons.id_end; ++pre) { uniform_int_distribution<int> post_nrns_ids(m_start + 50 * shift, m_start + 50 * (shift + 1)); for (int i = 0; i < nsyn; ++i) { post_rand = post_nrns_ids(rand_gen); vector_syn_pre_nrn.push_back(pre); vector_syn_post_nrn.push_back(post_rand); // tmp_w = weight_distr(rand_gen); tmp_w = weight_distr_U(rand_gen); if (tmp_w <= 0) tmp_w = weight; // tmp_d = delay_distr(rand_gen); tmp_d = delay_distr_U(rand_gen); if (tmp_d <= 0.01) tmp_d = delay; vector_syn_weight.push_back(tmp_w); vector_syn_delay.push_back(ms_to_step(tmp_d)); vector_syn_delay_timer.push_back(-1); } } } void connect_fixed_outdegree(Group &pre_neurons, Group &post_neurons, double delay, double weight, int outdegree=50, short high_distr=0) { /** * */ // STR if (weight < 0) weight /= 1000; if (post_neurons.model == INTER) { printf("POST INTER "); weight /= 11.0; } uniform_int_distribution<int> nsyn_distr(outdegree - 15, outdegree); uniform_int_distribution<int> post_nrns_ids(post_neurons.id_start, post_neurons.id_end); double d_spread, w_spread; if (high_distr == 0) { d_spread = delay / 6; w_spread = weight / 6; } else if (high_distr == 1) { d_spread = delay / 5; w_spread = weight / 5.5; } else if (high_distr == 2) { d_spread = delay / 3.5; w_spread = weight / 3.5; } else if (high_distr == 3) { d_spread = delay / 4; w_spread = weight / 4; } else { logic_error("distr only 0 1 2"); } normal_distribution<double> delay_distr(delay, d_spread); normal_distribution<double> weight_distr(weight, w_spread); auto nsyn = nsyn_distr(rand_gen); printf("Connect OUTdegree %s [%d..%d] to %s [%d..%d] (1:%d). Synapses %d, D=%.1f, W=%.2f\n", pre_neurons.group_name.c_str(), pre_neurons.id_start, pre_neurons.id_end, post_neurons.group_name.c_str(), post_neurons.id_start, post_neurons.id_end, outdegree, post_neurons.group_size * outdegree, delay, weight); // int postrand = 0; for (int pre = pre_neurons.id_start; pre <= pre_neurons.id_end; ++pre) { for (int i = 0; i < nsyn; ++i) { postrand = post_nrns_ids(rand_gen); vector_syn_pre_nrn.push_back(pre); vector_syn_post_nrn.push_back(postrand); if (post_neurons.model == AFFERENTS) { vector_syn_weight.push_back(weight); vector_syn_delay.push_back(ms_to_step(delay)); } else { vector_syn_weight.push_back(weight_distr(rand_gen)); vector_syn_delay.push_back(ms_to_step(delay_distr(rand_gen))); } vector_syn_delay_timer.push_back(-1); } } } void connectinsidenucleus(Group &nucleus) { connect_fixed_indegree(nucleus, nucleus, 0.5, 0.25, 50, 3); } void file_writing(int test_index, GroupMetadata &metadata, const string &folder) { /** * */ ofstream file; string file_name = "/dat/" + to_string(test_index) + "_" + metadata.group.group_name + ".dat"; file.open(folder + file_name); // save voltage for (unsigned int sim_iter = 0; sim_iter < SIM_TIME_IN_STEPS; sim_iter++) file << metadata.voltage_array[sim_iter] << " "; file << endl; // save g_exc for (unsigned int sim_iter = 0; sim_iter < SIM_TIME_IN_STEPS; sim_iter++) file << metadata.g_exc[sim_iter] << " "; file << endl; // save g_inh for (unsigned int sim_iter = 0; sim_iter < SIM_TIME_IN_STEPS; sim_iter++) file << metadata.g_inh[sim_iter] << " "; file << endl; // save spikes for (double const &value: metadata.spike_vector) { file << value << " "; } file.close(); cout << "Saved to: " << folder + file_name << endl; } void save(vector<Group> groups) { for (Group &group : groups) saving_groups.emplace_back(GroupMetadata(group, SIM_TIME_IN_STEPS)); } void copy_data_to(GroupMetadata& metadata, const double* Vm, double* tmp, const double* g_exc, const double* g_inh_A, const double* g_inh_B, const bool* has_spike, const unsigned int sim_iter) { double nrn_mean_volt = 0; double nrn_mean_g_exc = 0; double nrn_mean_g_inh = 0; int center; unsigned int id_start = metadata.group.id_start; unsigned int id_end = metadata.group.id_end; short shift = (vector_models[id_start] == MUSCLE) ? 2 : 1; if (metadata.group.group_name == "muscle_E" || metadata.group.group_name == "muscle_F") { // #pragma omp parallel default(none) shared(vector_nrn_start_seg, Vm, tmp, nrn_mean_volt, id_start, id_end, shift) // #pragma omp for reduction(+:nrn_mean_volt) private(center) schedule(auto) for (auto nrn = id_start; nrn <= id_end; ++nrn) { center = vector_nrn_start_seg[nrn] + shift; nrn_mean_volt += (Vm[center] - tmp[nrn]); tmp[nrn] = Vm[center]; nrn_mean_g_exc += g_exc[nrn]; nrn_mean_g_inh += (g_inh_B[nrn] - g_inh_A[nrn]); if (has_spike[nrn]) { metadata.spike_vector.push_back(step_to_ms(sim_iter)); } } metadata.voltage_array[sim_iter] = nrn_mean_volt / metadata.group.group_size / dt * (4 * PI * 10000); } else { for (unsigned int nrn = id_start; nrn <= id_end; ++nrn) { center = vector_nrn_start_seg[nrn] + shift; nrn_mean_volt += Vm[center]; nrn_mean_g_exc += g_exc[nrn]; nrn_mean_g_inh += (g_inh_B[nrn] - g_inh_A[nrn]); if (has_spike[nrn]) { metadata.spike_vector.push_back(step_to_ms(sim_iter)); } } metadata.voltage_array[sim_iter] = nrn_mean_volt / metadata.group.group_size; } metadata.g_exc[sim_iter] = nrn_mean_g_exc / metadata.group.group_size; metadata.g_inh[sim_iter] = nrn_mean_g_inh / metadata.group.group_size; } void save_result(int test_index) { string current_path = getcwd(nullptr, 0); for (GroupMetadata &metadata : saving_groups) file_writing(test_index, metadata, current_path); printf("[Test #%d] Saved results to: %s \n", test_index, current_path.c_str()); } template<typename type> type* arr_init(int size = NRNS_AND_SEGS) { // important: NRNS_AND_SEGS initialized at network building return new type[size](); } void createmotif(Group &OM0, Group &OM1, Group &OM2, Group &OM3) { /** * Connects motif module * see https://github.com/research-team/memristive-spinal-cord/blob/master/doc/diagram/cpg_generator_FE_paper.png */ connect_fixed_indegree(OM0, OM1, 3, 0.9, 50, 5); connect_fixed_indegree(OM1, OM2, 3, 0.55, 50, 5); // 0.85 connect_fixed_indegree(OM2, OM1, 3, 0.55, 50, 5); connect_fixed_indegree(OM1, OM3, 2.4, 0.0003); // 2.5 connect_fixed_indegree(OM2, OM3, 2.4, 0.0005); // 2.5 connect_fixed_indegree(OM3, OM2, 2.4, -3); connect_fixed_indegree(OM3, OM1, 2.4, -3); } void createmotif_flexor(Group &OM0, Group &OM1, Group &OM2, Group &OM3) { connect_fixed_indegree(OM0, OM1, 3, 0.9, 50, 5); connect_fixed_indegree(OM1, OM2, 3, 0.61, 50, 5); // 0.85 connect_fixed_indegree(OM2, OM1, 3, 0.55, 50, 5); connect_fixed_indegree(OM1, OM3, 2.4, 0.0002); // 2.5 connect_fixed_indegree(OM2, OM3, 2.4, 0.0004); // 4 connect_fixed_indegree(OM3, OM2, 2.4, -2); // -1 - noise, -5 - void connect_fixed_indegree(OM3, OM1, 2.4, -3); } __global__ void neuron_kernel(hiprandState_t *state, States *S, const Parameters *P, Neurons *N, Generators *G, int t) { /** * */ int i1, i3; int tid = blockIdx.x * blockDim.x + threadIdx.x; // for (int nrn = tid; nrn < N->size; nrn += blockDim.x * gridDim.x) { // reset the spike state N->has_spike[nrn] = false; // if (P->models[nrn] != GENERATOR) { // calc the borders of the neuron by theirs segments i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; // generate pseudo-random noise // re-calc currents and states based on synaptic activity setup_tree_matrix(S, P, N, nrn, i1, i3); // solve equations nrn_solve(S, P, N, nrn, i1, i3); // change voltage of the neurons based on solved equations update(S, P, N, nrn, i1, i3); // recalc conductance, update channels and deliver network events nrn_fixed_step_lastpart(S, P, N, nrn, i1, i3); if (N->ref_time_timer[nrn] > 0) N->ref_time_timer[nrn]--; } } // update generators if (tid == 0) { for (int generator = 0; generator < G->size; ++generator) { if (t == G->spike_each_step[generator] && t < G->time_end[generator]) { G->spike_each_step[generator] += G->freq_in_steps[generator]; N->has_spike[G->nrn_id[generator]] = true; } } // afferent // float part; // if ( ((25 / dt <= t) && (t < 50 / dt)) || ((150 / dt <= t) && (t < 175 / dt)) ) { // part = (2678 - 2559) * 0.4; // for (int n = 2559; n <= 2678 - part; n += 2) // N->has_spike[n] = false; // } else if ( ((50 / dt <= t) && (t < 75 / dt)) || ((125 / dt <= t) && (t < 150 / dt)) ) { // part = (2678 - 2559) * 0.6; // for (int n = 2559; n <= 2678 - part; n += 3) // N->has_spike[n] = false; // } } } __global__ void synapse_kernel(Neurons *N, Synapses* synapses) { /** * void deliver_net_events(NrnThread* nt) */ int pre_nrn, post_id; double weight; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < synapses->size; index += blockDim.x * gridDim.x) { pre_nrn = synapses->syn_pre_nrn[index]; // synapse update if (synapses->syn_delay_timer[index] > 0) { synapses->syn_delay_timer[index]--; } else { // if timer is over -> synapse change the conductance of the post neuron if (synapses->syn_delay_timer[index] == 0) { post_id = synapses->syn_post_nrn[index]; weight = synapses->syn_weight[index]; if (weight >= 0) { if (N->ref_time_timer[post_id] == 0) atomicAdd(&N->g_exc[post_id], weight); } else { atomicAdd(&N->g_inh_A[post_id], -weight * N->factor[post_id]); atomicAdd(&N->g_inh_B[post_id], -weight * N->factor[post_id]); } synapses->syn_delay_timer[index] = -1; } else { // if pre nrn has spike and synapse is ready to send siagnal if (N->has_spike[pre_nrn] && synapses->syn_delay_timer[index] == -1) { synapses->syn_delay_timer[index] = synapses->syn_delay[index]; } } } } }
ccad248386a1616c3b8dc5b312072e5ea53ec603.cu
/** See the topology https://github.com/research-team/memristive-spinal-cord/blob/master/doc/diagram/cpg_generator_FE_paper.png Based on the NEURON repository. */ //#define LOG #include <omp.h> #include <assert.h> #include <random> #include <vector> #include <string> #include "structs.h" #include <stdexcept> #include <curand_kernel.h> // for file writing #include <cstdlib> #include <iostream> #include <fstream> #include <unistd.h> #include <stdio.h> #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) ) #define PI 3.141592654f using namespace std; random_device r; default_random_engine rand_gen(r()); static void HandleError(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { printf("!!! %s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } const double dt = 0.025; // [ms] simulation step const int cv_fr = 200; // frequency of CV int step_number; // [step] number of full cycle steps const bool EXTRACELLULAR = false; unsigned int one_step_time; int skin_time; // duration of layer 25 = 21 cm/s; 50 = 15 cm/s; 125 = 6 cm/s int slices_extensor; unsigned int slice_time = 25; double E_coef; int slices_flexor; // flexor duration (125 or 175 ms for 4pedal) double cv_coef; bool str_flag = false; unsigned int sim_time; unsigned int SIM_TIME_IN_STEPS; unsigned int NRNS_NUMBER = 0; // [id] global neuron id = number of neurons unsigned int NRNS_AND_SEGS = 0; // [id] global neuron+segs id = number of neurons with segments const int neurons_in_group = 50; // number of neurons in a group const int neurons_in_ip = 196; // number of neurons in a group // common neuron constants // normal const double V_th = -40; // [mV] voltage threshold const double V_adj = -63; // [mV] adjust voltage for -55 threshold // moto neuron constants const double amA = 0.4; // const ??? todo const double amB = 66; // const ??? todo const double amC = 5; // const ??? todo const double bmA = 0.4; // const ??? todo const double bmB = 32; // const ??? todo const double bmC = 5; // const ??? todo const double ca0 = 2; // initial calcium concentration const double R_const = 8.314472; // [k-mole] or [joule/degC] const const double F_const = 96485.34; // [faraday] or [kilocoulombs] const // muscle fiber constants // const double g_kno = 0.01; // [S/cm2] conductance of the todo // const double g_kir = 0.03; // [S/cm2] conductance of the Inwardly Rectifying Potassium K+ (Kir) channel // Boltzman steady state curve const double vhalfl = -98.92; // [mV] inactivation half-potential const double kl = 10.89; // [mV] Stegen et al. 2012 // tau_infty const double vhalft = 67.0828; // [mV] fitted //100 uM sens curr 350a, Stegen et al. 2012 const double at = 0.00610779; // [/ ms] Stegen et al. 2012 const double bt = 0.0817741; // [/ ms] Note: typo in Stegen et al. 2012 // temperature dependence const double q10 = 1; // temperature scaling (sensitivity) const double celsius = 36; // [degC] temperature of the cell // i_membrane [mA/cm2] const int nlayer = 2; const double e_extracellular = 0; // [mV] const double xraxial = 1e9; // [MOhm/cm] // neuron parameters vector<unsigned int> vector_nrn_start_seg; vector<char> vector_models; vector<double> vector_Cm, vector_gnabar, vector_gkbar, vector_gl, vector_Ra, vector_diam, vector_length, vector_ena, vector_ek, vector_el, vector_gkrect, vector_gcaN, vector_gcaL, vector_gcak; // synaptic parameters vector<double> vector_E_ex, vector_E_inh, vector_tau_exc, vector_tau_inh1, vector_tau_inh2; // synapses varaibels vector<int> vector_syn_pre_nrn, vector_syn_post_nrn, vector_syn_delay, vector_syn_delay_timer; vector<double> vector_syn_weight; // results vector vector <GroupMetadata> saving_groups; // for debugging vector <Group> all_groups; // generators vector<unsigned int> vec_time_end, vec_nrn_id, vec_freq_in_steps, vec_spike_each_step; double *bimodal_distr_for_moto_neurons(const unsigned int nrnnumber) { int standby_percent = 70; double diameter_active = 27.0; double diameter_standby = 57.0; double* nrn_diameter = new double [nrnnumber]; random_device r1; default_random_engine generator1(r1()); normal_distribution<double> d_active(diameter_active, 3); normal_distribution<double> d_standby(diameter_standby, 6); int standby_size = (int) (nrnnumber * standby_percent / 100); int active_size = nrnnumber - standby_size; for (int i = 0; i < active_size; i++) nrn_diameter[i] = d_active(generator1); for (int i = active_size; i < nrnnumber; i++) nrn_diameter[i] = d_standby(generator1); return nrn_diameter; } // form structs of neurons global ID and groups name Group form_group(const string &group_name, int nrns_in_group = neurons_in_group, const char model = INTER, const int segs = 1) { /** * */ Group group = Group(); group.group_name = group_name; // name of a neurons group group.id_start = NRNS_NUMBER; // first ID in the group group.id_end = NRNS_NUMBER + nrns_in_group - 1; // the latest ID in the group group.group_size = nrns_in_group; // size of the neurons group group.model = model; double Cm, gnabar, gkbar, gl, Ra, ena, ek, el, diam, dx, gkrect, gcaN, gcaL, gcak, e_ex, e_inh, tau_exc, tau_inh1, tau_inh2; uniform_real_distribution<double> Cm_distr(0.3, 2.5); uniform_real_distribution<double> Cm_distr_muscle(2.5, 4.0); uniform_real_distribution<double> length_distr_muscle(2500, 3500); normal_distribution<double> moto_Cm_distr(2, 0.5); uniform_int_distribution<int> inter_diam_distr(5, 15); uniform_real_distribution<double> afferent_diam_distr(15, 35); uniform_real_distribution<double> gl_distr_MUSCLE(0.0005, 0.001); // 8, 12 uniform_real_distribution<double> tau_exc_distr_MUSCLE(0.33, 0.35); double* diameters; // if (model == MOTO) diameters = bimodal_distr_for_moto_neurons(nrns_in_group); for (int nrn = 0; nrn < nrns_in_group; nrn++) { if (model == INTER) { Cm = Cm_distr(rand_gen); gnabar = 0.1; gkbar = 0.08; gl = 0.002; Ra = 100.0; ena = 50.0; ek = -77.0; el = -70.0; diam = inter_diam_distr(rand_gen); dx = diam; e_ex = 50; e_inh = -80; tau_exc = 0.35; tau_inh1 = 0.5; tau_inh2 = 3.5; } else if (model == AFFERENTS) { Cm = 2; gnabar = 0.5; gkbar = 0.04; gl = 0.002; Ra = 200.0; ena = 50.0; ek = -90.0; el = -70.0; diam = afferent_diam_distr(rand_gen); // 10 dx = diam; e_ex = 50; e_inh = -80; tau_exc = 0.35; tau_inh1 = 0.5; tau_inh2 = 3.5; } else if (model == MOTO) { Cm = moto_Cm_distr(rand_gen); gnabar = 0.05; gl = 0.002; Ra = 200.0; ena = 50.0; ek = -80.0; el = -70.0; diam = diameters[nrn]; dx = diam; gkrect = 0.3; gcaN = 0.05; gcaL = 0.0001; gcak = 0.3; e_ex = 50.0; e_inh = -80.0; tau_exc = 0.3; tau_inh1 = 1.0; tau_inh2 = 1.5; if (diam > 50) { gnabar = 0.1; gcaL = 0.001; gl = 0.003; gkrect = 0.2; gcak = 0.2; } } else if (model == MUSCLE) { Cm = Cm_distr_muscle(rand_gen); gnabar = 0.03; gkbar = 0.06; // gl = 0.001; gl = gl_distr_MUSCLE(rand_gen); Ra = 1.1; ena = 55.0; ek = -90.0; el = -70.0; diam = 40.0; dx = length_distr_muscle(rand_gen); e_ex = 0.0; e_inh = -80.0; tau_exc = 0.35; // tau_exc = tau_exc_distr_MUSCLE(rand_gen); tau_inh1 = 1.0; tau_inh2 = 1.0; } else if (model == GENERATOR) { // nothing } else { throw logic_error("Choose the model"); } // common properties vector_Cm.push_back(Cm); vector_gnabar.push_back(gnabar); vector_gkbar.push_back(gkbar); vector_gl.push_back(gl); vector_el.push_back(el); vector_ena.push_back(ena); vector_ek.push_back(ek); vector_Ra.push_back(Ra); vector_diam.push_back(diam); vector_length.push_back(dx); vector_gkrect.push_back(gkrect); vector_gcaN.push_back(gcaN); vector_gcaL.push_back(gcaL); vector_gcak.push_back(gcak); vector_E_ex.push_back(e_ex); vector_E_inh.push_back(e_inh); vector_tau_exc.push_back(tau_exc); vector_tau_inh1.push_back(tau_inh1); vector_tau_inh2.push_back(tau_inh2); // vector_nrn_start_seg.push_back(NRNS_AND_SEGS); NRNS_AND_SEGS += (segs + 2); vector_models.push_back(model); // void (*foo)(int); // foo = &my_int_func; } NRNS_NUMBER += nrns_in_group; printf("Formed %s IDs [%d ... %d] = %d\n", group_name.c_str(), NRNS_NUMBER - nrns_in_group, NRNS_NUMBER - 1, nrns_in_group); // for debugging all_groups.push_back(group); return group; } __host__ unsigned int ms_to_step(double ms) { return (unsigned int) (ms / dt); } __host__ double step_to_ms(int step) { return step * dt; } // copy data from host to device template<typename type> void memcpyHtD(type *host, type *gpu, unsigned int size) { HANDLE_ERROR(cudaMemcpy(gpu, host, sizeof(type) * size, cudaMemcpyHostToDevice)); } // copy data from device to host template<typename type> void memcpyDtH(type *gpu, type *host, unsigned int size) { HANDLE_ERROR(cudaMemcpy(host, gpu, size * sizeof(type), cudaMemcpyDeviceToHost)); } // init GPU array and copy data from the CPU array template<typename type> type* init_gpu_arr(type *cpu_var, unsigned int size = NRNS_AND_SEGS) { type *gpu_var; HANDLE_ERROR(cudaMalloc(&gpu_var, size * sizeof(type))); memcpyHtD<type>(cpu_var, gpu_var, size); return gpu_var; } // init GPU array and copy data from the CPU vector template<typename type> type *init_gpu_arr(vector<type> &vec) { type *gpu_var; HANDLE_ERROR(cudaMalloc(&gpu_var, sizeof(type) * vec.size())); memcpyHtD<type>(vec.data(), gpu_var, vec.size()); return gpu_var; } void add_generator(Group &group, double start, double end, double freq) { vec_nrn_id.push_back(group.id_start); vec_time_end.push_back(ms_to_step(end)); vec_freq_in_steps.push_back(ms_to_step(1000 / freq)); vec_spike_each_step.push_back(ms_to_step(start)); printf("start %d end %d freq %d\n", ms_to_step(start), ms_to_step(end), ms_to_step(1000 / freq)); } // convert vector to the array template<typename type> type* vec2arr(vector<type> &vec) { return vec.cpu_vector.data(); } __device__ double Exp(double volt) { return (volt < -100)? 0 : exp(volt); } __device__ double alpham(double volt) { if (abs((volt + amB) / amC) < 1e-6) return amA * amC; return amA * (volt + amB) / (1.0 - Exp(-(volt + amB) / amC)); } __device__ double betam(double volt) { if (abs((volt + bmB) / bmC) < 1e-6) return -bmA * bmC; return -bmA * (volt + bmB) / (1.0 - Exp((volt + bmB) / bmC)); } __device__ double syn_current(Neurons* N, const Parameters* P, int nrn, double voltage) { /** * calculate synaptic current */ return N->g_exc[nrn] * (voltage - P->E_ex[nrn]) + (N->g_inh_B[nrn] - N->g_inh_A[nrn]) * (voltage - P->E_inh[nrn]); } __device__ double nrn_moto_current(States* S, const Parameters* P, Neurons* N, int nrn, int nrn_seg_index, double voltage) { /** * calculate channels current */ double iNa = P->gnabar[nrn] * pow(S->m[nrn_seg_index], 3) * S->h[nrn_seg_index] * (voltage - P->ena[nrn]); double iK = P->gkrect[nrn] * pow(S->n[nrn_seg_index], 4) * (voltage - P->ek[nrn]) + P->gcak[nrn] * pow(S->cai[nrn_seg_index], 2) / (pow(S->cai[nrn_seg_index], 2) + 0.014 * 0.014) * (voltage - P->ek[nrn]); double iL = P->gl[nrn] * (voltage - P->el[nrn]); double E_Ca = (1000.0 * R_const * 309.15 / (2.0 * F_const)) * log(ca0 / S->cai[nrn_seg_index]); S->I_Ca[nrn_seg_index] = P->gcaN[nrn] * S->mc[nrn_seg_index] * S->mc[nrn_seg_index] * S->hc[nrn_seg_index] * (voltage - E_Ca) + P->gcaL[nrn] * S->p[nrn_seg_index] * (voltage - E_Ca); return iNa + iK + iL + S->I_Ca[nrn_seg_index]; } __device__ double nrn_fastchannel_current(States* S, const Parameters* P, Neurons* N, int nrn, int nrn_seg_index, double voltage) { /** * calculate channels current */ double iNa = P->gnabar[nrn] * pow(S->m[nrn_seg_index], 3) * S->h[nrn_seg_index] * (voltage - P->ena[nrn]); double iK = P->gkbar[nrn] * pow(S->n[nrn_seg_index], 4) * (voltage - P->ek[nrn]); double iL = P->gl[nrn] * (voltage - P->el[nrn]); return iNa + iK + iL; } __device__ void recalc_synaptic(States* S, const Parameters* P, Neurons* N, int nrn) { /** * updating conductance(summed) of neurons' post-synaptic conenctions */ // exc synaptic conductance if (N->g_exc[nrn] != 0.0) { N->g_exc[nrn] -= (1.0 - exp(-dt / P->tau_exc[nrn])) * N->g_exc[nrn]; if (N->g_exc[nrn] < 1e-5) { N->g_exc[nrn] = 0.0; } } // inh1 synaptic conductance if (N->g_inh_A[nrn] != 0.0) { N->g_inh_A[nrn] -= (1.0 - exp(-dt / P->tau_inh1[nrn])) * N->g_inh_A[nrn]; if (N->g_inh_A[nrn] < 1e-5) { N->g_inh_A[nrn] = 0.0; } } // inh2 synaptic conductance if (N->g_inh_B[nrn] != 0.0) { N->g_inh_B[nrn] -= (1.0 - exp(-dt / P->tau_inh2[nrn])) * N->g_inh_B[nrn]; if (N->g_inh_B[nrn] < 1e-5) N->g_inh_B[nrn] = 0.0; } } __device__ void syn_initial(States* S, const Parameters* P, Neurons* N, int nrn) { /** * initialize tau(rise / decay time, ms) and factor(const) variables */ if (P->tau_inh1[nrn] / P->tau_inh2[nrn] > 0.9999) P->tau_inh1[nrn] = 0.9999 * P->tau_inh2[nrn]; if (P->tau_inh1[nrn] / P->tau_inh2[nrn] < 1e-9) P->tau_inh1[nrn] = P->tau_inh2[nrn] * 1e-9; // double tp = (P->tau_inh1[nrn] * P->tau_inh2[nrn]) / (P->tau_inh2[nrn] - P->tau_inh1[nrn]) * log(P->tau_inh2[nrn] / P->tau_inh1[nrn]); N->factor[nrn] = -exp(-tp / P->tau_inh1[nrn]) + exp(-tp / P->tau_inh2[nrn]); N->factor[nrn] = 1.0 / N->factor[nrn]; } __device__ void nrn_inter_initial(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * initialize channels, based on cropped evaluate_fct function */ double V_mem = V - V_adj; // double a = 0.32 * (13.0 - V_mem) / (exp((13.0 - V_mem) / 4.0) - 1.0); double b = 0.28 * (V_mem - 40.0) / (exp((V_mem - 40.0) / 5.0) - 1.0); S->m[nrn_seg_index] = a / (a + b); // m_inf // a = 0.128 * exp((17.0 - V_mem) / 18.0); b = 4.0 / (1.0 + exp((40.0 - V_mem) / 5.0)); S->h[nrn_seg_index] = a / (a + b); // h_inf // a = 0.032 * (15.0 - V_mem) / (exp((15.0 - V_mem) / 5.0) - 1.0); b = 0.5 * exp((10.0 - V_mem) / 40.0); S->n[nrn_seg_index] = a / (a + b); // n_inf } __device__ void nrn_moto_initial(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * initialize channels, based on cropped evaluate_fct function */ double a = alpham(V); S->m[nrn_seg_index] = a / (a + betam(V)); // m_inf S->h[nrn_seg_index] = 1.0 / (1.0 + Exp((V + 65.0) / 7.0)); // h_inf S->p[nrn_seg_index] = 1.0 / (1.0 + Exp(-(V + 55.8) / 3.7)); // p_inf S->n[nrn_seg_index] = 1.0 / (1.0 + Exp(-(V + 38.0) / 15.0)); // n_inf S->mc[nrn_seg_index] = 1.0 / (1.0 + Exp(-(V + 32.0) / 5.0)); // mc_inf S->hc[nrn_seg_index] = 1.0 / (1.0 + Exp((V + 50.0) / 5.0)); // hc_inf S->cai[nrn_seg_index] = 0.0001; } __device__ void nrn_muslce_initial(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * initialize channels, based on cropped evaluate_fct function */ double V_mem = V - V_adj; // m_inf double a = 0.32 * (13.0 - V_mem) / (exp((13.0 - V_mem) / 4.0) - 1.0); double b = 0.28 * (V_mem - 40.0) / (exp((V_mem - 40.0) / 5.0) - 1.0); S->m[nrn_seg_index] = a / (a + b); // h_inf a = 0.128 * exp((17.0 - V_mem) / 18.0); b = 4.0 / (1.0 + exp((40.0 - V_mem) / 5.0)); S->h[nrn_seg_index] = a / (a + b); // n_inf a = 0.032 * (15.0 - V_mem) / (exp((15.0 - V_mem) / 5.0) - 1.0); b = 0.5 * exp((10.0 - V_mem) / 40.0); S->n[nrn_seg_index] = a / (a + b); } __device__ void recalc_inter_channels(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * calculate new states of channels (evaluate_fct) */ // BREAKPOINT -> states -> evaluate_fct double V_mem = V - V_adj; // double a = 0.32 * (13.0 - V_mem) / (exp((13.0 - V_mem) / 4.0) - 1.0); double b = 0.28 * (V_mem - 40.0) / (exp((V_mem - 40.0) / 5.0) - 1.0); double tau = 1.0 / (a + b); double inf = a / (a + b); S->m[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->m[nrn_seg_index]); // a = 0.128 * exp((17.0 - V_mem) / 18.0); b = 4.0 / (1.0 + exp((40.0 - V_mem) / 5.0)); tau = 1.0 / (a + b); inf = a / (a + b); S->h[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->h[nrn_seg_index]); // a = 0.032 * (15.0 - V_mem) / (exp((15.0 - V_mem) / 5.0) - 1.0); b = 0.5 * exp((10.0 - V_mem) / 40.0); tau = 1.0 / (a + b); inf = a / (a + b); // states S->n[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->n[nrn_seg_index]); } __device__ void recalc_moto_channels(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * calculate new states of channels (evaluate_fct) */ // BREAKPOINT -> states -> evaluate_fct double a = alpham(V); double b = betam(V); // m double tau = 1.0 / (a + b); double inf = a / (a + b); S->m[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->m[nrn_seg_index]); // h tau = 30.0 / (Exp((V + 60.0) / 15.0) + Exp(-(V + 60.0) / 16.0)); inf = 1.0 / (1 + Exp((V + 65.0) / 7.0)); S->h[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->h[nrn_seg_index]); // DELAYED RECTIFIER POTASSIUM tau = 5.0 / (Exp((V + 50.0) / 40.0) + Exp(-(V + 50.0) / 50.0)); inf = 1.0 / (1.0 + Exp(-(V + 38.0) / 15.0)); S->n[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->n[nrn_seg_index]); // CALCIUM DYNAMICS N-type double mc_inf = 1.0 / (1.0 + Exp(-(V + 32.0) / 5.0)); double hc_inf = 1.0 / (1.0 + Exp((V + 50.0) / 5.0)); // CALCIUM DYNAMICS L-type tau = 400.0; inf = 1.0 / (1.0 + Exp(-(V + 55.8) / 3.7)); S->p[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->p[nrn_seg_index]); // states S->mc[nrn_seg_index] += (1.0 - exp(-dt / 15.0)) * (mc_inf - S->mc[nrn_seg_index]); // tau_mc = 15 S->hc[nrn_seg_index] += (1.0 - exp(-dt / 50.0)) * (hc_inf - S->hc[nrn_seg_index]); // tau_hc = 50 S->cai[nrn_seg_index] += (1.0 - exp(-dt * 0.04)) * (-0.01 * S->I_Ca[nrn_seg_index] / 0.04 - S->cai[nrn_seg_index]); } __device__ void recalc_muslce_channels(States* S, const Parameters* P, Neurons* N, int nrn_seg_index, double V) { /** * calculate new states of channels (evaluate_fct) */ // BREAKPOINT -> states -> evaluate_fct double V_mem = V - V_adj; // double a = 0.32 * (13.0 - V_mem) / (exp((13.0 - V_mem) / 4.0) - 1.0); double b = 0.28 * (V_mem - 40.0) / (exp((V_mem - 40.0) / 5.0) - 1.0); double tau = 1.0 / (a + b); double inf = a / (a + b); S->m[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->m[nrn_seg_index]); // a = 0.128 * exp((17.0 - V_mem) / 18.0); b = 4.0 / (1.0 + exp((40.0 - V_mem) / 5.0)); tau = 1.0 / (a + b); inf = a / (a + b); S->h[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->h[nrn_seg_index]); // a = 0.032 * (15.0 - V_mem) / (exp((15.0 - V_mem) / 5.0) - 1.0); b = 0.5 * exp((10.0 - V_mem) / 40.0); tau = 1.0 / (a + b); inf = a / (a + b); S->n[nrn_seg_index] += (1.0 - exp(-dt / tau)) * (inf - S->n[nrn_seg_index]); // // // double qt = pow(q10, (celsius - 33.0) / 10.0); // double linf = 1.0 / (1.0 + exp((V - vhalfl) / kl)); // l_steadystate // double taul = 1.0 / (qt * (at * exp(-V / vhalft) + bt * exp(V / vhalft))); // double alpha = 0.3 / (1.0 + exp((V + 43.0) / -5.0)); // double beta = 0.03 / (1.0 + exp((V + 80.0) / -1.0)); // double stau = 1.0 / (alpha + beta); // double sinf = alpha / (alpha + beta); // // states // S->l[nrn_seg_index] += (1.0 - exp(-dt / taul)) * (linf - S->l[nrn_seg_index]); // S->s[nrn_seg_index] += (1.0 - exp(-dt / stau)) * (sinf - S->s[nrn_seg_index]); } __device__ void nrn_rhs_ext(States* S, const Parameters* P, Neurons* N, int i1, int i3) { /** * void nrn_rhs_ext(NrnThread* _nt) */ int size = S->ext_size; const double xg[5] = {0, 1e9, 1e9, 1e9, 0}; // for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { for (int layer = 0; layer < nlayer; ++layer) { // zeroed at nrn_rhs before nrn_rhs_ext S->EXT_RHS[nrn_seg + layer * size] = 0; } S->EXT_RHS[nrn_seg + 0 * size] -= S->NODE_RHS[nrn_seg]; } #ifdef LOG printf("nrn_rhs_ext::EXT RHS 0 : "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_RHS[ii]); printf("\n"); printf("nrn_rhs_ext::EXT RHS 1 : "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_RHS[ii + size]); printf("\n"); #endif double x, dv; for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { #ifdef LOG printf("V: %g, %g\n", S->EXT_V[nrn_seg], S->EXT_V[nrn_seg + size]); #endif for (int layer = 0; layer < nlayer; ++layer) { dv = S->EXT_V[nrn_seg - 1 + layer * size] - S->EXT_V[nrn_seg + layer * size]; S->EXT_RHS[nrn_seg + layer * size] -= S->EXT_B[nrn_seg + layer * size] * dv; S->EXT_RHS[nrn_seg - 1 + layer * size] += S->EXT_A[nrn_seg + layer * size] * dv; } int layer = nlayer - 1; S->EXT_RHS[nrn_seg + layer * size] -= xg[nrn_seg - i1] * (S->EXT_RHS[nrn_seg + layer * size] - e_extracellular); for (--layer; layer >= 0; --layer) { /* between j and j+1 layer */ x = xg[nrn_seg - i1] * (S->EXT_V[nrn_seg + layer * size] - S->EXT_V[nrn_seg + (layer + 1) * size]); S->EXT_RHS[nrn_seg + layer * size] -= x; S->EXT_RHS[nrn_seg + (layer + 1) * size] += x; } } #ifdef LOG printf("nrn_rhs_ext::EXT RHS END 0 : "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_RHS[ii]); printf("\n"); printf("nrn_rhs_ext::EXT RHS END 1 : "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_RHS[ii + size]); printf("\n"); #endif } __device__ void nrn_setup_ext(States* S, const Parameters* P, Neurons* N, int i1, int i3) { /** * void nrn_setup_ext(NrnThread* _nt) */ double cj = 1 / dt; double cfac = 0.001 * cj; int size = S->ext_size; const double xg[5] = {0, 1e9, 1e9, 1e9, 0}; const double xc[5] = {0, 0, 0, 0, 0}; // todo find the place where it is zeroed for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) for (int layer = 0; layer < nlayer; ++layer) S->EXT_D[nrn_seg + layer * size] = 0; // d contains all the membrane conductances (and capacitance) // i.e. (cm/dt + di/dvm - dis/dvi)*[dvi] and (dis/dvi)*[dvx] for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { // nde->_d only has -ELECTRODE_CURRENT contribution S->EXT_D[nrn_seg + 0 * size] += S->NODE_D[nrn_seg]; } // NEURON D 0 = [0 0.1442 0.1442 0.1442 0 ] [0 0 0 0 0] // GRAS [0 0.1442 0.1442 0.1442 0 ] [0 0 0 0 0] // series resistance, capacitance, and axial terms for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { // series resistance and capacitance to ground int layer = 0; while (1) { double mfac = xg[nrn_seg - i1] + xc[nrn_seg - i1] * cfac; S->EXT_D[nrn_seg + layer * size] += mfac; layer += 1; if (layer == nlayer) break; S->EXT_D[nrn_seg + layer * size] += mfac; } // axial connections for (layer = 0; layer < nlayer; ++layer) { S->EXT_D[nrn_seg + layer * size] -= S->EXT_B[nrn_seg + layer * size]; S->EXT_D[nrn_seg - 1 + layer * size] -= S->EXT_A[nrn_seg + layer * size]; } } // NEURON D[0] = [2e-08 1e+09 1e+09 1e+09 2e-08 ] GRAS [2e-08 1e+09 1e+09 1e+09 2e-08] // NEURON D[1] = [2e-08 2e+09 2e+09 2e+09 2e-08 ] GRAS [2e-08 2e+09 2e+09 2e+09 2e-08] } __device__ void nrn_update_2d(States* S, const Parameters* P, Neurons* N, int i1, int i3) { /** * void nrn_update_2d(NrnThread* nt) * update has already been called so modify nd->v based on dvi we only need to * update extracellular nodes and base the corresponding nd->v on dvm (dvm = dvi - dvx) */ // final voltage updating int size = S->ext_size; for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) for (int layer = 0; layer < nlayer; ++layer) S->EXT_V[nrn_seg + layer * size] += S->EXT_RHS[nrn_seg + layer * size]; } __device__ void nrn_rhs(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void nrn_rhs(NrnThread *_nt) combined with the first part of nrn_lhs * calculate right hand side of * cm*dvm/dt = -i(vm) + is(vi) + ai_j*(vi_j - vi) * cx*dvx/dt - cm*dvm/dt = -gx*(vx - ex) + i(vm) + ax_j*(vx_j - vx) * This is a common operation for fixed step, cvode, and daspk methods */ // init _rhs and _lhs (NODE_D) as zero for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { S->NODE_RHS[nrn_seg] = 0; // replace the process: init by 0, add Cm*frac, add A and B } // update MOD rhs, CAPS has no current [CAP MOD CAP]! // int center_segment = i1 + ((P->models[nrn] == MUSCLE)? 2 : 1); // update segments except CAPs double V, _rhs; for (int nrn_seg = i1 + 1; nrn_seg < i3 - 1; ++nrn_seg) { V = S->Vm[nrn_seg]; // SYNAPTIC update // static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type) _rhs = syn_current(N, P, nrn, V); _rhs *= 1.e2 / S->NODE_AREA[nrn_seg]; S->NODE_RHS[nrn_seg] -= _rhs; // NEURON update // static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type) if (P->models[nrn] == INTER || P->models[nrn] == AFFERENTS) { // muscle and inter has the same fast_channel function _rhs = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V); } else if (P->models[nrn] == MOTO) { _rhs = nrn_moto_current(S, P, N, nrn, nrn_seg, V); } else if (P->models[nrn] == MUSCLE) { // muscle and inter has the same fast_channel function _rhs = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V); } else { printf("\nERROR\n"); } // save data like in NEURON (after .mod nrn_cur) S->NODE_RHS[nrn_seg] -= _rhs; // note that CAP has no jacob } // end FOR segments if (EXTRACELLULAR) { // Cannot have any axial terms yet so that i(vm) can be calculated from // i(vm)+is(vi) and is(vi) which are stored in rhs vector. nrn_rhs_ext(S, P, N, i1, i3); // nrn_rhs_ext has also computed the the internal axial current for those // nodes containing the extracellular mechanism } double dv; for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { dv = S->Vm[nrn_seg - 1] - S->Vm[nrn_seg]; // our connection coefficients are negative so S->NODE_RHS[nrn_seg] -= S->NODE_B[nrn_seg] * dv; S->NODE_RHS[nrn_seg - 1] += S->NODE_A[nrn_seg] * dv; } #ifdef LOG printf("RHS EXRTA 0 : "); for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) printf("%g\t", S->EXT_RHS[nrn_seg]); printf("\n"); printf("RHS EXRTA 1 : "); for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) printf("%g\t", S->EXT_RHS[nrn_seg + S->ext_size]); printf("\n"); #endif } __device__ void nrn_lhs(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** calculate left hand side of * cm*dvm/dt = -i(vm) + is(vi) + ai_j*(vi_j - vi) * cx*dvx/dt - cm*dvm/dt = -gx*(vx - ex) + i(vm) + ax_j*(vx_j - vx) * with a matrix so that the solution is of the form [dvm+dvx,dvx] on the right hand side after solving. */ for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { S->NODE_D[nrn_seg] = 0; } // update MOD rhs, CAPS has no current [CAP MOD CAP]! // int center_segment = i1 + ((P->models[nrn] == MUSCLE)? 2 : 1); // update segments except CAPs double V, _g, _rhs; for (int nrn_seg = i1 + 1; nrn_seg < i3 - 1; ++nrn_seg) { V = S->Vm[nrn_seg]; // SYNAPTIC update // static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type) _g = syn_current(N, P, nrn, V + 0.001); _rhs = syn_current(N, P, nrn, V); _g = (_g - _rhs) / 0.001; _g *= 1.e2 / S->NODE_AREA[nrn_seg]; _rhs *= 1.e2 / S->NODE_AREA[nrn_seg]; // static void nrn_jacob(_NrnThread* _nt, _Memb_list* _ml, int _type) S->NODE_D[nrn_seg] += _g; // void nrn_cap_jacob(NrnThread* _nt, Memb_list* ml) { S->NODE_D[nrn_seg] += S->const_NODE_D[nrn_seg]; // activsynapse_lhs() // activclamp_lhs() // NEURON update // static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type) if (P->models[nrn] == INTER || P->models[nrn] == AFFERENTS) { // muscle and inter has the same fast_channel function _g = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V + 0.001); _rhs = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V); } else if (P->models[nrn] == MOTO) { _g = nrn_moto_current(S, P, N, nrn, nrn_seg, V + 0.001); _rhs = nrn_moto_current(S, P, N, nrn, nrn_seg, V); } else if (P->models[nrn] == MUSCLE) { // muscle and inter has the same fast_channel function _g = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V + 0.001); _rhs = nrn_fastchannel_current(S, P, N, nrn, nrn_seg, V); } else { assert(false); } // save data like in NEURON (after .mod nrn_cur) _g = (_g - _rhs) / 0.001; // note that CAP has no jacob S->NODE_D[nrn_seg] += _g; } // end FOR segments if (EXTRACELLULAR) { nrn_setup_ext(S, P, N, i1, i3); } // activstim_rhs() // activclamp_rhs() // at this point d contains all the membrane conductances // now add the axial currents for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { S->NODE_D[nrn_seg] -= S->NODE_B[nrn_seg]; S->NODE_D[nrn_seg - 1] -= S->NODE_A[nrn_seg]; } #ifdef LOG printf("NODED axial:\t"); for (int i = i1; i < i3; ++i) printf("%g\t", S->NODE_D[i]); printf("\n"); #endif } __device__ void bksub(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void bksub(NrnThread* _nt) */ // intracellular S->NODE_RHS[i1] /= S->NODE_D[i1]; // for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { S->NODE_RHS[nrn_seg] -= S->NODE_B[nrn_seg] * S->NODE_RHS[nrn_seg - 1]; S->NODE_RHS[nrn_seg] /= S->NODE_D[nrn_seg]; } // extracellular // if (EXTRACELLULAR) { // int size = S->ext_size; // for (int layer = 0; layer < nlayer; ++layer) { // S->EXT_RHS[i1 + layer * size] /= S->EXT_D[i1 + layer * size]; // } // for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { // for (int layer = 0; layer < nlayer; ++layer) { // S->EXT_RHS[nrn_seg + layer * size] -= S->EXT_B[nrn_seg + layer * size] * S->EXT_RHS[nrn_seg - 1 + layer * size]; // S->EXT_RHS[nrn_seg + layer * size] /= S->EXT_D[nrn_seg + layer * size]; // } // } // } } __device__ void triang(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void triang(NrnThread* _nt) */ // intracellular double ppp; for (int nrn_seg = i3 - 1; nrn_seg >= i1 + 1; --nrn_seg) { ppp = S->NODE_A[nrn_seg] / S->NODE_D[nrn_seg]; S->NODE_D[nrn_seg - 1] -= ppp * S->NODE_B[nrn_seg]; S->NODE_RHS[nrn_seg - 1] -= ppp * S->NODE_RHS[nrn_seg]; } // extracellular // if (EXTRACELLULAR) { // int size = S->ext_size; // for (int nrn_seg = i3 - 1; nrn_seg >= i1 + 1; --nrn_seg) { // for (int layer = 0; layer < nlayer; ++layer) { // ppp = S->EXT_A[nrn_seg + layer * size] / S->EXT_D[nrn_seg + layer * size]; // S->EXT_D[nrn_seg - 1 + layer * size] -= ppp * S->EXT_B[nrn_seg + layer * size]; // S->EXT_RHS[nrn_seg - 1 + layer * size] -= ppp * S->EXT_RHS[nrn_seg + layer * size]; // nrn_seg--; // } // } // } } __device__ void nrn_solve(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void nrn_solve(NrnThread* _nt) */ #ifdef LOG printf("SOLVE EXT begin NODED 0 "); for (int i=i1; i < i3; ++i) printf("%g\t", S->EXT_D[i]); printf("\n"); printf("SOLVE EXT begin NODED 0 "); for (int i=i1; i < i3; ++i) printf("%g\t", S->EXT_D[i + S->ext_size]); printf("\n"); #endif // TODO PROOVED // nrn_solve EXT D 0 5e+07 2e-09 2e-09 1e-09 5e+07 // nrn_solve EXT D 1 5e+07 5e-10 5e-10 1e-09 5e+07 triang(S, P, N, nrn, i1, i3); bksub(S, P, N, nrn, i1, i3); } __device__ void setup_tree_matrix(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void setup_tree_matrix(NrnThread* _nt) */ nrn_rhs(S, P, N, nrn, i1, i3); nrn_lhs(S, P, N, nrn, i1, i3); } __device__ void update(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void update(NrnThread* _nt) */ #ifdef LOG printf("UPDATE EXT begin NODED 0 "); for (int i=i1; i < i3; ++i) printf("%g\t", S->EXT_D[i]); printf("\n"); printf("UPDATE EXT begin NODED 1 "); for (int i=i1; i < i3; ++i) printf("%g\t", S->EXT_D[i + S->ext_size]); printf("\n"); // final voltage updating #endif for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { S->Vm[nrn_seg] += S->NODE_RHS[nrn_seg]; } // extracellular if (EXTRACELLULAR) { nrn_update_2d(S, P, N, i1, i3); } } __device__ void nrn_deliver_events(States* S, const Parameters* P, Neurons* N, int nrn) { /** * void nrn_deliver_events(NrnThread* nt) */ // get the central segment (for detecting spikes): i1 + (2 or 1) int seg_update = P->nrn_start_seg[nrn] + ((P->models[nrn] == MUSCLE)? 2 : 1); // check if neuron has spike with special flag for avoidance multi-spike detecting if (!N->spike_on[nrn] && S->Vm[seg_update] > V_th && N->ref_time_timer[nrn] == 0) { N->spike_on[nrn] = true; N->has_spike[nrn] = true; N->ref_time_timer[nrn] = N->ref_time[nrn]; } else if (S->Vm[seg_update] < V_th) { N->spike_on[nrn] = false; } } __device__ void nrn_fixed_step_lastpart(States* S, const Parameters* P, Neurons* N, int nrn, int i1, int i3) { /** * void *nrn_fixed_step_lastpart(NrnThread *nth) */ // update neurons' synapses state recalc_synaptic(S, P, N, nrn); // update neurons' segments state if (P->models[nrn] == INTER || P->models[nrn] == AFFERENTS) { for(int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { recalc_inter_channels(S, P, N, nrn_seg, S->Vm[nrn_seg]); } } else if (P->models[nrn] == MOTO) { for(int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { recalc_moto_channels(S, P, N, nrn_seg, S->Vm[nrn_seg]); } } else if (P->models[nrn] == MUSCLE) { for(int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { recalc_muslce_channels(S, P, N, nrn_seg, S->Vm[nrn_seg]); } } else { assert(false); } // spike detection for (in synapse kernel) nrn_deliver_events(S, P, N, nrn); } __device__ void nrn_area_ri(States* S, const Parameters* P, Neurons* N) { /** * void nrn_area_ri(Section *sec) [790] treeset.c * area for right circular cylinders. Ri as right half of parent + left half of this */ printf("GPU: nrn_area_ri\n"); double dx, rleft, rright; int i1, i3, nrn_seg, segments; // for (int nrn = 0; nrn < N->size; ++nrn) { if (P->models[nrn] == GENERATOR) continue; i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; segments = (i3 - i1 - 2); dx = P->length[nrn] / segments; // divide by the last index of node (or segments count) rright = 0; // todo sec->pnode needs +1 index for (nrn_seg = i1 + 1; nrn_seg < i1 + segments + 1; ++nrn_seg) { // area for right circular cylinders. Ri as right half of parent + left half of this S->NODE_AREA[nrn_seg] = PI * dx * P->diam[nrn]; rleft = 1.e-2 * P->Ra[nrn] * (dx / 2.0) / (PI * pow(P->diam[nrn], 2) / 4.0); // left half segment Megohms S->NODE_RINV[nrn_seg] = 1.0 / (rleft + rright); // uS rright = rleft; } //the first and last segments has zero length. Area is 1e2 in dimensionless units S->NODE_AREA[i1] = 100.0; nrn_seg = i1 + segments + 1; // the last segment S->NODE_AREA[nrn_seg] = 100.0; S->NODE_RINV[nrn_seg] = 1.0 / rright; } } __device__ void ext_con_coef(States* S, const Parameters* P, Neurons* N) { /** * void ext_con_coef(void) * setup a and b */ double dx; int layer, i1, i3, segments, size = S->ext_size; // todo: extracellular only for those neurons who need for (int nrn = 0; nrn < N->size; ++nrn) { if (P->models[nrn] == GENERATOR) continue; layer = 0; i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; segments = (i3 - i1 - 2); // temporarily store half segment resistances in rhs for (int nrn_seg = i1 + 1; nrn_seg < i3 - 1; ++nrn_seg) { dx = P->length[nrn] / segments; S->EXT_RHS[nrn_seg + layer * size] = 1e-4 * xraxial * dx / 2; // Megohms } // last segment has 0 length S->EXT_RHS[i3 - 1 + layer * size] = 0; // todo i3 -1 or just i3 // NEURON RHS = [5e+07 5e+07 5e+07 0 ] // GRAS RHS = [0 5e+07 5e+07 5e+07 0 ] #ifdef LOG printf("EXT RHS: "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_RHS[ii]); printf("\n"); #endif // node half resistances in general get added to the node and to the node's "child node in the same section". // child nodes in different sections don't involve parent node's resistance S->EXT_B[i1 + 1 + layer * size] = S->EXT_RHS[i1 + 1 + layer * size]; for (int nrn_seg = i1 + 1 + 1; nrn_seg < i3; ++nrn_seg) { S->EXT_B[nrn_seg + layer * size] = S->EXT_RHS[nrn_seg + layer * size] + S->EXT_RHS[nrn_seg - 1 + layer * size]; // Megohms } // NEURON B = [5e+07 1e+08 1e+08 5e+07 ] // GRAS B = [0 5e+07 1e+08 1e+08 5e+07 ] #ifdef LOG printf("EXT B: "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_B[ii]); printf("\n"); #endif // first the effect of node on parent equation. Note That last nodes have area = 1.e2 in // dimensionless units so that last nodes have units of microsiemens's double area = S->NODE_AREA[i1]; // parentnode index of sec is 0 double rall_branch = 1.0; // sec->prop->dparam[4].val S->EXT_A[i1 + 1 + layer * size] = -1.e2 * rall_branch / (S->EXT_B[i1 + 1 + layer * size] * area); for (int nrn_seg = i1 + 1 + 1; nrn_seg < i3; ++nrn_seg) { area = S->NODE_AREA[nrn_seg - 1]; S->EXT_A[nrn_seg + layer * size] = -1.e2 / (S->EXT_B[nrn_seg + layer * size] * area); } // NEURON A = [-2e-08 -7.95775e-12 -7.95775e-12 -1.59155e-11 ] // GRAS A = [0 -2e-08 -7.95775e-12 -7.95775e-12 -1.59155e-11 ] #ifdef LOG printf("EXT A: "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_A[ii]); printf("\n"); #endif // now the effect of parent on node equation // todo sec->pnode needs +1 index for (int nrn_seg = i1 + 1; nrn_seg < i3; ++nrn_seg) { S->EXT_B[nrn_seg + layer * size] = -1.e2 / (S->EXT_B[nrn_seg + layer * size] * S->NODE_AREA[nrn_seg]); } // NEURON B = [-1.59155e-11 -7.95775e-12 -7.95775e-12 -2e-08 ] // GRAS B = [0 -1.59155e-11 -7.95775e-12 -7.95775e-12 -2e-08 ] #ifdef LOG printf("EXT B END: "); for(int ii = i1; ii < i3; ++ii) printf("%g\t", S->EXT_B[ii]); printf("\n"); #endif // the same for other layers for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { S->EXT_A[nrn_seg + 1 * size] = S->EXT_A[nrn_seg + 0 * size]; S->EXT_B[nrn_seg + 1 * size] = S->EXT_B[nrn_seg + 0 * size]; S->EXT_RHS[nrn_seg + 1 * size] = S->EXT_RHS[nrn_seg + 0 * size]; } } } __device__ void connection_coef(States* S, const Parameters* P, Neurons* N) { /** * void connection_coef(void) treeset.c */ printf("GPU: connection_coef\n"); nrn_area_ri(S, P, N); int i1, i3, nrn_seg, segments; // for (int nrn = 0; nrn < N->size; ++nrn) { if (P->models[nrn] == GENERATOR) continue; i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; segments = (i3 - i1 - 2); // first the effect of node on parent equation. Note that last nodes have area = 1.e2 in dimensionless // units so that last nodes have units of microsiemens // todo sec->pnode needs +1 index nrn_seg = i1 + 1; // sec->prop->dparam[4].val = 1, what is dparam[4].val S->NODE_A[nrn_seg] = -1.e2 * 1.0 * S->NODE_RINV[nrn_seg] / S->NODE_AREA[nrn_seg - 1]; // todo sec->pnode needs +1 index for (nrn_seg = i1 + 1 + 1; nrn_seg < i1 + segments + 1 + 1; ++nrn_seg) { S->NODE_A[nrn_seg] = -1.e2 * S->NODE_RINV[nrn_seg] / S->NODE_AREA[nrn_seg - 1]; } // now the effect of parent on node equation // todo sec->pnode needs +1 index for (nrn_seg = i1 + 1; nrn_seg < i1 + segments + 1 + 1; ++nrn_seg) { S->NODE_B[nrn_seg] = -1.e2 * S->NODE_RINV[nrn_seg] / S->NODE_AREA[nrn_seg]; } } // for extracellular if (EXTRACELLULAR) { ext_con_coef(S, P, N); } /** * note: from LHS, this functions just recalc each time the constant NODED (!) * void nrn_lhs(NrnThread *_nt) * NODE_D[nrn, nd] updating is located at nrn_rhs, because _g is not the global variable */ // nt->cj = 2/dt if (secondorder) else 1/dt // note, the first is CAP // function nrn_cap_jacob(_nt, _nt->tml->ml); double cj = 1.0 / dt; double cfac = 0.001 * cj; for (int nrn = 0; nrn < N->size; ++nrn) { if (P->models[nrn] == GENERATOR) continue; i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; // nrn_cap_jacob for (nrn_seg = i1 + 1; nrn_seg < i3 - 1; ++nrn_seg) { S->const_NODE_D[nrn_seg] += cfac * P->Cm[nrn]; } } } __global__ void initialization_kernel(curandState *state, States* S, const Parameters* P, Neurons* N, double v_init) { /** * */ if (blockIdx.x * blockDim.x + threadIdx.x == 0) { int i1, i3; printf("GPU: initialization_kernel\n"); // connection_coef(S, P, N); // for different models -- different init function for (int nrn = 0; nrn < N->size; ++nrn) { // init random curand_init(7 + nrn, nrn, 0, &state[nrn]); // do not init neuron state for generator if (P->models[nrn] == GENERATOR) continue; i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; // for each segment init the neuron model for (int nrn_seg = i1; nrn_seg < i3; ++nrn_seg) { S->Vm[nrn_seg] = v_init; if (P->models[nrn] == INTER || P->models[nrn] == AFFERENTS) { nrn_inter_initial(S, P, N, nrn_seg, v_init); } else if (P->models[nrn] == MOTO) { nrn_moto_initial(S, P, N, nrn_seg, v_init); } else if (P->models[nrn] == MUSCLE) { nrn_muslce_initial(S, P, N, nrn_seg, v_init); } else { assert(false); } } // init RHS/LHS setup_tree_matrix(S, P, N, nrn, i1, i3); // init tau synapses syn_initial(S, P, N, nrn); } } } void conn_generator(Group &generator, Group &post_neurons, double delay, double weight, int indegree=50) { /** * todo */ uniform_int_distribution<int> nsyn_distr(indegree, indegree + 5); normal_distribution<double> delay_distr(delay, delay / 5); normal_distribution<double> weight_distr(weight, weight / 6); // int nsyn = nsyn_distr(rand_gen); printf("Connect generator %s [%d..%d] to %s [%d..%d] (1:%d). Synapses %d, D=%.1f, W=%.2f\n", generator.group_name.c_str(), generator.id_start, generator.id_end, post_neurons.group_name.c_str(), post_neurons.id_start, post_neurons.id_end, post_neurons.group_size, generator.group_size * post_neurons.group_size, delay, weight); // int nsyn = nsyn_distr(rand_gen); int gen_id = generator.id_start; if (generator.group_size > 1) { printf("Generator cannot include more than 1 neuron!\n"); exit(0); } for (int post = post_neurons.id_start; post <= post_neurons.id_end; ++post) { for (int i = 0; i < nsyn; ++i) { vector_syn_pre_nrn.push_back(gen_id); vector_syn_post_nrn.push_back(post); vector_syn_weight.push_back(weight_distr(rand_gen)); vector_syn_delay.push_back(ms_to_step(delay_distr(rand_gen))); vector_syn_delay_timer.push_back(-1); } } } void connect_fixed_indegree(Group &pre_neurons, Group &post_neurons, double delay, double weight, int indegree=50, short high_distr=0) { /** * */ // STR if (str_flag && weight < 0) weight = 0; if (post_neurons.model == INTER) { printf("POST INTER "); weight /= 11.0; } uniform_int_distribution<int> nsyn_distr(indegree - 15, indegree); uniform_int_distribution<int> pre_nrns_ids(pre_neurons.id_start, pre_neurons.id_end); double d_spread, w_spread; double d_left, d_right, w_left, w_right = 0; if (high_distr == 0) { d_spread = 0; //delay / 6; w_spread = 0; //weight / 6; } else if (high_distr == 1) { d_spread = delay / 5; w_spread = weight / 5.5; } else if (high_distr == 2) { d_spread = delay / 3.5; w_spread = weight / 2.5; } else if (high_distr == 3) { d_spread = delay / 1.2; w_spread = weight / 1.1; d_left = delay - d_spread; d_right = delay + d_spread; w_left = weight - w_spread; w_right = weight + w_spread + w_spread / 2; } else if (high_distr == 4) { d_spread = delay / 3; w_spread = weight / 3; d_left = delay - d_spread; d_right = delay + d_spread; w_left = weight - w_spread; w_right = weight + w_spread + w_spread / 2; } else if (high_distr == 5) { d_spread = delay / 1.1; w_spread = weight / 1.1; d_left = delay - d_spread; d_right = delay + d_spread + delay * 1.5; w_left = weight - w_spread; w_right = weight + w_spread + w_spread; }else { logic_error("distr only 0 1 2"); } normal_distribution<double> delay_distr(delay, d_spread); normal_distribution<double> weight_distr(weight, w_spread); uniform_real_distribution<double> delay_distr_U(d_left, d_right); uniform_real_distribution<double> weight_distr_U(w_left, w_right); auto nsyn = nsyn_distr(rand_gen); printf("Connect indegree %s [%d..%d] to %s [%d..%d] (1:%d). Synapses %d, D=%.1f, W=%.2f\n", pre_neurons.group_name.c_str(), pre_neurons.id_start, pre_neurons.id_end, post_neurons.group_name.c_str(), post_neurons.id_start, post_neurons.id_end, indegree, post_neurons.group_size * indegree, delay, weight); // int prerand = 0; double tmp_w = 0; double tmp_d = 0; for (int post = post_neurons.id_start; post <= post_neurons.id_end; ++post) { for (int i = 0; i < nsyn; ++i) { prerand = pre_nrns_ids(rand_gen); vector_syn_pre_nrn.push_back(prerand); vector_syn_post_nrn.push_back(post); if (post_neurons.model == AFFERENTS) { vector_syn_weight.push_back(weight); vector_syn_delay.push_back(ms_to_step(delay)); } else { if (high_distr == 3 || high_distr == 4 || high_distr == 5) { tmp_w = weight_distr_U(rand_gen); tmp_d = delay_distr_U(rand_gen); } else { tmp_w = weight_distr(rand_gen); if (tmp_w <= 0) { tmp_w = weight; } tmp_d = delay_distr(rand_gen); if (tmp_d <= 0.01) { tmp_d = delay; } } vector_syn_weight.push_back(tmp_w); vector_syn_delay.push_back(ms_to_step(tmp_d)); } vector_syn_delay_timer.push_back(-1); } } } void connect_fixed_outdegree_MUSCLE(Group &pre_neurons, Group &post_neurons, double delay, double weight, int indegree=50, short high_distr=0) { /** * */ uniform_int_distribution<int> nsyn_distr(indegree - 15, indegree); // double d_spread, w_spread; // if (high_distr == 0) { // d_spread = 0;//delay / 6; // w_spread = 0;//weight / 6; // } else if (high_distr == 1) { // d_spread = delay / 5; // w_spread = weight / 5.5; // } else if (high_distr == 2) { // d_spread = delay / 4; // w_spread = weight / 4; // }else { // logic_error("distr only 0 1 2"); // } // normal_distribution<double> delay_distr(delay, d_spread); // normal_distribution<double> weight_distr(weight, w_spread); auto nsyn = nsyn_distr(rand_gen); printf("Connect indegree %s [%d..%d] to %s [%d..%d] (1:%d). Synapses %d, D=%.1f, W=%.2f\n", pre_neurons.group_name.c_str(), pre_neurons.id_start, pre_neurons.id_end, post_neurons.group_name.c_str(), post_neurons.id_start, post_neurons.id_end, indegree, post_neurons.group_size * indegree, delay, weight); // int shift, post_rand = 0; double tmp_w, tmp_d = 0; double d_left, d_right, w_left, w_right, d_spread, w_spread = 0; if (high_distr == 5) { d_spread = delay / 1.1; w_spread = weight / 1.1; d_left = delay - d_spread; d_right = delay + d_spread + delay * 1.5; w_left = weight - w_spread; w_right = weight + w_spread + w_spread; } else { d_left = delay; d_right = delay + 3; w_left = weight - weight / 1.5; w_right = weight; } uniform_real_distribution<double> delay_distr_U(d_left, d_right); uniform_real_distribution<double> weight_distr_U(w_left, w_right); int m_start = post_neurons.id_start; for (int pre = pre_neurons.id_start; pre <= pre_neurons.id_end; ++pre) { uniform_int_distribution<int> post_nrns_ids(m_start + 50 * shift, m_start + 50 * (shift + 1)); for (int i = 0; i < nsyn; ++i) { post_rand = post_nrns_ids(rand_gen); vector_syn_pre_nrn.push_back(pre); vector_syn_post_nrn.push_back(post_rand); // tmp_w = weight_distr(rand_gen); tmp_w = weight_distr_U(rand_gen); if (tmp_w <= 0) tmp_w = weight; // tmp_d = delay_distr(rand_gen); tmp_d = delay_distr_U(rand_gen); if (tmp_d <= 0.01) tmp_d = delay; vector_syn_weight.push_back(tmp_w); vector_syn_delay.push_back(ms_to_step(tmp_d)); vector_syn_delay_timer.push_back(-1); } } } void connect_fixed_outdegree(Group &pre_neurons, Group &post_neurons, double delay, double weight, int outdegree=50, short high_distr=0) { /** * */ // STR if (weight < 0) weight /= 1000; if (post_neurons.model == INTER) { printf("POST INTER "); weight /= 11.0; } uniform_int_distribution<int> nsyn_distr(outdegree - 15, outdegree); uniform_int_distribution<int> post_nrns_ids(post_neurons.id_start, post_neurons.id_end); double d_spread, w_spread; if (high_distr == 0) { d_spread = delay / 6; w_spread = weight / 6; } else if (high_distr == 1) { d_spread = delay / 5; w_spread = weight / 5.5; } else if (high_distr == 2) { d_spread = delay / 3.5; w_spread = weight / 3.5; } else if (high_distr == 3) { d_spread = delay / 4; w_spread = weight / 4; } else { logic_error("distr only 0 1 2"); } normal_distribution<double> delay_distr(delay, d_spread); normal_distribution<double> weight_distr(weight, w_spread); auto nsyn = nsyn_distr(rand_gen); printf("Connect OUTdegree %s [%d..%d] to %s [%d..%d] (1:%d). Synapses %d, D=%.1f, W=%.2f\n", pre_neurons.group_name.c_str(), pre_neurons.id_start, pre_neurons.id_end, post_neurons.group_name.c_str(), post_neurons.id_start, post_neurons.id_end, outdegree, post_neurons.group_size * outdegree, delay, weight); // int postrand = 0; for (int pre = pre_neurons.id_start; pre <= pre_neurons.id_end; ++pre) { for (int i = 0; i < nsyn; ++i) { postrand = post_nrns_ids(rand_gen); vector_syn_pre_nrn.push_back(pre); vector_syn_post_nrn.push_back(postrand); if (post_neurons.model == AFFERENTS) { vector_syn_weight.push_back(weight); vector_syn_delay.push_back(ms_to_step(delay)); } else { vector_syn_weight.push_back(weight_distr(rand_gen)); vector_syn_delay.push_back(ms_to_step(delay_distr(rand_gen))); } vector_syn_delay_timer.push_back(-1); } } } void connectinsidenucleus(Group &nucleus) { connect_fixed_indegree(nucleus, nucleus, 0.5, 0.25, 50, 3); } void file_writing(int test_index, GroupMetadata &metadata, const string &folder) { /** * */ ofstream file; string file_name = "/dat/" + to_string(test_index) + "_" + metadata.group.group_name + ".dat"; file.open(folder + file_name); // save voltage for (unsigned int sim_iter = 0; sim_iter < SIM_TIME_IN_STEPS; sim_iter++) file << metadata.voltage_array[sim_iter] << " "; file << endl; // save g_exc for (unsigned int sim_iter = 0; sim_iter < SIM_TIME_IN_STEPS; sim_iter++) file << metadata.g_exc[sim_iter] << " "; file << endl; // save g_inh for (unsigned int sim_iter = 0; sim_iter < SIM_TIME_IN_STEPS; sim_iter++) file << metadata.g_inh[sim_iter] << " "; file << endl; // save spikes for (double const &value: metadata.spike_vector) { file << value << " "; } file.close(); cout << "Saved to: " << folder + file_name << endl; } void save(vector<Group> groups) { for (Group &group : groups) saving_groups.emplace_back(GroupMetadata(group, SIM_TIME_IN_STEPS)); } void copy_data_to(GroupMetadata& metadata, const double* Vm, double* tmp, const double* g_exc, const double* g_inh_A, const double* g_inh_B, const bool* has_spike, const unsigned int sim_iter) { double nrn_mean_volt = 0; double nrn_mean_g_exc = 0; double nrn_mean_g_inh = 0; int center; unsigned int id_start = metadata.group.id_start; unsigned int id_end = metadata.group.id_end; short shift = (vector_models[id_start] == MUSCLE) ? 2 : 1; if (metadata.group.group_name == "muscle_E" || metadata.group.group_name == "muscle_F") { // #pragma omp parallel default(none) shared(vector_nrn_start_seg, Vm, tmp, nrn_mean_volt, id_start, id_end, shift) // #pragma omp for reduction(+:nrn_mean_volt) private(center) schedule(auto) for (auto nrn = id_start; nrn <= id_end; ++nrn) { center = vector_nrn_start_seg[nrn] + shift; nrn_mean_volt += (Vm[center] - tmp[nrn]); tmp[nrn] = Vm[center]; nrn_mean_g_exc += g_exc[nrn]; nrn_mean_g_inh += (g_inh_B[nrn] - g_inh_A[nrn]); if (has_spike[nrn]) { metadata.spike_vector.push_back(step_to_ms(sim_iter)); } } metadata.voltage_array[sim_iter] = nrn_mean_volt / metadata.group.group_size / dt * (4 * PI * 10000); } else { for (unsigned int nrn = id_start; nrn <= id_end; ++nrn) { center = vector_nrn_start_seg[nrn] + shift; nrn_mean_volt += Vm[center]; nrn_mean_g_exc += g_exc[nrn]; nrn_mean_g_inh += (g_inh_B[nrn] - g_inh_A[nrn]); if (has_spike[nrn]) { metadata.spike_vector.push_back(step_to_ms(sim_iter)); } } metadata.voltage_array[sim_iter] = nrn_mean_volt / metadata.group.group_size; } metadata.g_exc[sim_iter] = nrn_mean_g_exc / metadata.group.group_size; metadata.g_inh[sim_iter] = nrn_mean_g_inh / metadata.group.group_size; } void save_result(int test_index) { string current_path = getcwd(nullptr, 0); for (GroupMetadata &metadata : saving_groups) file_writing(test_index, metadata, current_path); printf("[Test #%d] Saved results to: %s \n", test_index, current_path.c_str()); } template<typename type> type* arr_init(int size = NRNS_AND_SEGS) { // important: NRNS_AND_SEGS initialized at network building return new type[size](); } void createmotif(Group &OM0, Group &OM1, Group &OM2, Group &OM3) { /** * Connects motif module * see https://github.com/research-team/memristive-spinal-cord/blob/master/doc/diagram/cpg_generator_FE_paper.png */ connect_fixed_indegree(OM0, OM1, 3, 0.9, 50, 5); connect_fixed_indegree(OM1, OM2, 3, 0.55, 50, 5); // 0.85 connect_fixed_indegree(OM2, OM1, 3, 0.55, 50, 5); connect_fixed_indegree(OM1, OM3, 2.4, 0.0003); // 2.5 connect_fixed_indegree(OM2, OM3, 2.4, 0.0005); // 2.5 connect_fixed_indegree(OM3, OM2, 2.4, -3); connect_fixed_indegree(OM3, OM1, 2.4, -3); } void createmotif_flexor(Group &OM0, Group &OM1, Group &OM2, Group &OM3) { connect_fixed_indegree(OM0, OM1, 3, 0.9, 50, 5); connect_fixed_indegree(OM1, OM2, 3, 0.61, 50, 5); // 0.85 connect_fixed_indegree(OM2, OM1, 3, 0.55, 50, 5); connect_fixed_indegree(OM1, OM3, 2.4, 0.0002); // 2.5 connect_fixed_indegree(OM2, OM3, 2.4, 0.0004); // 4 connect_fixed_indegree(OM3, OM2, 2.4, -2); // -1 - noise, -5 - void connect_fixed_indegree(OM3, OM1, 2.4, -3); } __global__ void neuron_kernel(curandState *state, States *S, const Parameters *P, Neurons *N, Generators *G, int t) { /** * */ int i1, i3; int tid = blockIdx.x * blockDim.x + threadIdx.x; // for (int nrn = tid; nrn < N->size; nrn += blockDim.x * gridDim.x) { // reset the spike state N->has_spike[nrn] = false; // if (P->models[nrn] != GENERATOR) { // calc the borders of the neuron by theirs segments i1 = P->nrn_start_seg[nrn]; i3 = P->nrn_start_seg[nrn + 1]; // generate pseudo-random noise // re-calc currents and states based on synaptic activity setup_tree_matrix(S, P, N, nrn, i1, i3); // solve equations nrn_solve(S, P, N, nrn, i1, i3); // change voltage of the neurons based on solved equations update(S, P, N, nrn, i1, i3); // recalc conductance, update channels and deliver network events nrn_fixed_step_lastpart(S, P, N, nrn, i1, i3); if (N->ref_time_timer[nrn] > 0) N->ref_time_timer[nrn]--; } } // update generators if (tid == 0) { for (int generator = 0; generator < G->size; ++generator) { if (t == G->spike_each_step[generator] && t < G->time_end[generator]) { G->spike_each_step[generator] += G->freq_in_steps[generator]; N->has_spike[G->nrn_id[generator]] = true; } } // afferent // float part; // if ( ((25 / dt <= t) && (t < 50 / dt)) || ((150 / dt <= t) && (t < 175 / dt)) ) { // part = (2678 - 2559) * 0.4; // for (int n = 2559; n <= 2678 - part; n += 2) // N->has_spike[n] = false; // } else if ( ((50 / dt <= t) && (t < 75 / dt)) || ((125 / dt <= t) && (t < 150 / dt)) ) { // part = (2678 - 2559) * 0.6; // for (int n = 2559; n <= 2678 - part; n += 3) // N->has_spike[n] = false; // } } } __global__ void synapse_kernel(Neurons *N, Synapses* synapses) { /** * void deliver_net_events(NrnThread* nt) */ int pre_nrn, post_id; double weight; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < synapses->size; index += blockDim.x * gridDim.x) { pre_nrn = synapses->syn_pre_nrn[index]; // synapse update if (synapses->syn_delay_timer[index] > 0) { synapses->syn_delay_timer[index]--; } else { // if timer is over -> synapse change the conductance of the post neuron if (synapses->syn_delay_timer[index] == 0) { post_id = synapses->syn_post_nrn[index]; weight = synapses->syn_weight[index]; if (weight >= 0) { if (N->ref_time_timer[post_id] == 0) atomicAdd(&N->g_exc[post_id], weight); } else { atomicAdd(&N->g_inh_A[post_id], -weight * N->factor[post_id]); atomicAdd(&N->g_inh_B[post_id], -weight * N->factor[post_id]); } synapses->syn_delay_timer[index] = -1; } else { // if pre nrn has spike and synapse is ready to send siagnal if (N->has_spike[pre_nrn] && synapses->syn_delay_timer[index] == -1) { synapses->syn_delay_timer[index] = synapses->syn_delay[index]; } } } } }
a90edfb119d15b0eb754fb82539f2178446d86ff.hip
// !!! This is a file automatically generated by hipify!!! // /usr/local/cuda-8.0/bin/nvcc -O3 -gencode arch=compute_50,code=sm_50 -m64 FFTShift.cu -lcufft_static -lculibos --relocatable-device-code=true #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <assert.h> #include <hipfft.h> #include <hipfftXt.h> #include "Utilities.cuh" #include "TimingGPU.cuh" //#define DEBUG #define BLOCKSIZE 256 /*****************************************/ /* FFTSHIFT 1D IN-PLACE MEMORY MOVEMENTS */ /*****************************************/ __global__ void fftshift_1D_inplace_memory_movements(float2 * __restrict__ d_inout, const unsigned int N) { unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N/2) { float2 temp = d_inout[tid]; d_inout[tid] = d_inout[tid + (N / 2)]; d_inout[tid + (N / 2)] = temp; } } /*********************************************/ /* FFTSHIFT 1D OUT-OF-PLACE MEMORY MOVEMENTS */ /*********************************************/ __global__ void fftshift_1D_outofplace_memory_movements(const float2 * __restrict__ d_in, float2 * __restrict__ d_out, const unsigned int N) { unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N/2) { d_out[tid] = d_in[tid + (N / 2)]; d_out[tid + (N / 2)] = d_in[tid]; } } /**********************************************/ /* FFTSHIFT 1D INPLACE CHESSBOARD - VERSION 1 */ /**********************************************/ __device__ float2 fftshift_1D_chessboard_callback_v1(void *d_in, size_t offset, void *callerInfo, void *sharedPtr) { float a = (float)(1-2*((int)offset%2)); float2 out = ((float2*)d_in)[offset]; out.x = out.x * a; out.y = out.y * a; return out; } __device__ cufftCallbackLoadC fftshift_1D_chessboard_callback_v1_Ptr = fftshift_1D_chessboard_callback_v1; /**********************************************/ /* FFTSHIFT 1D INPLACE CHESSBOARD - VERSION 2 */ /**********************************************/ __device__ float2 fftshift_1D_chessboard_callback_v2(void *d_in, size_t offset, void *callerInfo, void *sharedPtr) { float a = pow(-1.,(double)(offset&1)); float2 out = ((float2*)d_in)[offset]; out.x = out.x * a; out.y = out.y * a; return out; } __device__ cufftCallbackLoadC fftshift_1D_chessboard_callback_v2_Ptr = fftshift_1D_chessboard_callback_v2; /**********************************************/ /* FFTSHIFT 1D INPLACE CHESSBOARD - VERSION 3 */ /**********************************************/ __device__ float2 fftshift_1D_chessboard_callback_v3(void *d_in, size_t offset, void *callerInfo, void *sharedPtr) { float2 out = ((float2*)d_in)[offset]; if ((int)offset&1) { out.x = -out.x; out.y = -out.y; } return out; } __device__ cufftCallbackLoadC fftshift_1D_chessboard_callback_v3_Ptr = fftshift_1D_chessboard_callback_v3; /********/ /* MAIN */ /********/ int main() { const int N = 131072; // const int N = 16; TimingGPU timerGPU; // --- Host side input array float2 *h_vect = (float2 *)malloc(N * sizeof(float2)); for (int i = 0; i < N; i++) { h_vect[i].x = (float)rand() / (float)RAND_MAX; h_vect[i].y = (float)rand() / (float)RAND_MAX; } // --- Host side output arrays float2 *h_out1 = (float2 *)malloc(N * sizeof(float2)); float2 *h_out2 = (float2 *)malloc(N * sizeof(float2)); float2 *h_out3 = (float2 *)malloc(N * sizeof(float2)); float2 *h_out4 = (float2 *)malloc(N * sizeof(float2)); float2 *h_out5 = (float2 *)malloc(N * sizeof(float2)); // --- Device side input arrays float2 *d_vect1; gpuErrchk(hipMalloc(&d_vect1, N * sizeof(float2))); float2 *d_vect2; gpuErrchk(hipMalloc(&d_vect2, N * sizeof(float2))); float2 *d_vect3; gpuErrchk(hipMalloc(&d_vect3, N * sizeof(float2))); float2 *d_vect4; gpuErrchk(hipMalloc(&d_vect4, N * sizeof(float2))); float2 *d_vect5; gpuErrchk(hipMalloc(&d_vect5, N * sizeof(float2))); gpuErrchk(hipMemcpy(d_vect1, h_vect, N * sizeof(float2), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_vect2, h_vect, N * sizeof(float2), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_vect3, h_vect, N * sizeof(float2), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_vect4, h_vect, N * sizeof(float2), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_vect5, h_vect, N * sizeof(float2), hipMemcpyHostToDevice)); // --- Device side output arrays float2 *d_out1; gpuErrchk(hipMalloc(&d_out1, N * sizeof(float2))); float2 *d_out2; gpuErrchk(hipMalloc(&d_out2, N * sizeof(float2))); float2 *d_out3; gpuErrchk(hipMalloc(&d_out3, N * sizeof(float2))); float2 *d_out4; gpuErrchk(hipMalloc(&d_out4, N * sizeof(float2))); float2 *d_out5; gpuErrchk(hipMalloc(&d_out5, N * sizeof(float2))); /***************************************************************/ /* VERSION 1: cuFFT + IN-PLACE MEMORY MOVEMENTS BASED FFTSHIFT */ /***************************************************************/ hipfftHandle planinverse; cufftSafeCall(hipfftPlan1d(&planinverse, N, HIPFFT_C2C, 1)); timerGPU.StartCounter(); cufftSafeCall(hipfftExecC2C(planinverse, d_vect1, d_vect1, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( fftshift_1D_inplace_memory_movements), dim3(iDivUp(N/2, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, d_vect1, N); #ifdef DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif printf("In-place memory movements elapsed time: %3.3f ms \n", timerGPU.GetCounter()); gpuErrchk(hipMemcpy(h_out1, d_vect1, N * sizeof(float2), hipMemcpyDeviceToHost)); /*******************************************************************/ /* VERSION 2: cuFFT + OUT-OF-PLACE MEMORY MOVEMENTS BASED FFTSHIFT */ /*******************************************************************/ hipfftHandle planinverse_v1; cufftSafeCall(hipfftPlan1d(&planinverse_v1, N, HIPFFT_C2C, 1)); timerGPU.StartCounter(); cufftSafeCall(hipfftExecC2C(planinverse_v1, d_vect2, d_vect2, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( fftshift_1D_outofplace_memory_movements), dim3(iDivUp(N/2, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, d_vect2, d_out2, N); #ifdef DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif printf("Out-of-place memory movements elapsed time: %3.3f ms \n", timerGPU.GetCounter()); gpuErrchk(hipMemcpy(h_out2, d_out2, N * sizeof(float2), hipMemcpyDeviceToHost)); // --- Checking the results for (int i=0; i<N; i++) if ((h_out1[i].x != h_out2[i].x)||(h_out1[i].y != h_out2[i].y)) { printf("Out-of-place memory movements test failed!\n"); return 0; } printf("Out-of-place memory movements test passed!\n"); /***************************************************/ /* VERSION 3: CHESSBOARD MULTIPLICATION V1 + cuFFT */ /***************************************************/ cufftCallbackLoadC hfftshift_1D_chessboard_callback_v1_Ptr; gpuErrchk(hipMemcpyFromSymbol(&hfftshift_1D_chessboard_callback_v1_Ptr, fftshift_1D_chessboard_callback_v1_Ptr, sizeof(hfftshift_1D_chessboard_callback_v1_Ptr))); hipfftHandle planinverse_v2; cufftSafeCall(hipfftCreate(&planinverse_v2)); size_t work_size_v2; cufftSafeCall(hipfftMakePlan1d(planinverse_v2, N, HIPFFT_C2C, 1, &work_size_v2)); cufftSafeCall(cufftXtSetCallback(planinverse_v2, (void **)&hfftshift_1D_chessboard_callback_v1_Ptr, CUFFT_CB_LD_COMPLEX, NULL)); timerGPU.StartCounter(); cufftSafeCall(hipfftExecC2C(planinverse_v2, d_vect3, d_out3, HIPFFT_BACKWARD)); printf("Chessboard v1 elapsed time: %3.3f ms \n", timerGPU.GetCounter()); gpuErrchk(hipMemcpy(h_out3, d_out3, N*sizeof(float2), hipMemcpyDeviceToHost)); // --- Checking the results for (int i=0; i<N; i++) if ((h_out1[i].x != h_out3[i].x)||(h_out1[i].y != h_out3[i].y)) { printf("Chessboard v1 test failed!\n"); return 0; } printf("Chessboard v1 test passed!\n"); /****************************************/ /* CHESSBOARD MULTIPLICATION V2 + cuFFT */ /****************************************/ cufftCallbackLoadC hfftshift_1D_chessboard_callback_v2_Ptr; gpuErrchk(hipMemcpyFromSymbol(&hfftshift_1D_chessboard_callback_v2_Ptr, fftshift_1D_chessboard_callback_v2_Ptr, sizeof(hfftshift_1D_chessboard_callback_v2_Ptr))); hipfftHandle planinverse_v3; cufftSafeCall(hipfftCreate(&planinverse_v3)); size_t work_size_v3; cufftSafeCall(hipfftMakePlan1d(planinverse_v3, N, HIPFFT_C2C, 1, &work_size_v3)); cufftSafeCall(cufftXtSetCallback(planinverse_v3, (void **)&hfftshift_1D_chessboard_callback_v2_Ptr, CUFFT_CB_LD_COMPLEX, 0)); timerGPU.StartCounter(); cufftSafeCall(hipfftExecC2C(planinverse_v3, d_vect4, d_out4, HIPFFT_BACKWARD)); printf("Chessboard v2 elapsed time: %3.3f ms \n", timerGPU.GetCounter()); gpuErrchk(hipMemcpy(h_out4, d_out4, N*sizeof(float2), hipMemcpyDeviceToHost)); // --- Checking the results for (int i=0; i<N; i++) if ((h_out1[i].x != h_out4[i].x)||(h_out1[i].y != h_out4[i].y)) { printf("Chessboard v2 test failed!\n"); return 0; } printf("Chessboard v2 test passed!\n"); /****************************************/ /* CHESSBOARD MULTIPLICATION V3 + cuFFT */ /****************************************/ cufftCallbackLoadC hfftshift_1D_chessboard_callback_v3_Ptr; gpuErrchk(hipMemcpyFromSymbol(&hfftshift_1D_chessboard_callback_v3_Ptr, fftshift_1D_chessboard_callback_v3_Ptr, sizeof(hfftshift_1D_chessboard_callback_v3_Ptr))); hipfftHandle planinverse_v4; cufftSafeCall(hipfftCreate(&planinverse_v4)); size_t work_size_v4; cufftSafeCall(hipfftMakePlan1d(planinverse_v4, N, HIPFFT_C2C, 1, &work_size_v4)); cufftSafeCall(cufftXtSetCallback(planinverse_v4, (void **)&hfftshift_1D_chessboard_callback_v3_Ptr, CUFFT_CB_LD_COMPLEX, 0)); timerGPU.StartCounter(); cufftSafeCall(hipfftExecC2C(planinverse_v4, d_vect5, d_out5, HIPFFT_BACKWARD)); printf("Chessboard v3 elapsed time: %3.3f ms \n", timerGPU.GetCounter()); gpuErrchk(hipMemcpy(h_out5, d_out5, N*sizeof(float2), hipMemcpyDeviceToHost)); // --- Checking the results for (int i=0; i<N; i++) if ((h_out1[i].x != h_out5[i].x)||(h_out1[i].y != h_out5[i].y)) { printf("Chessboard v3 test failed!\n"); return 0; } printf("Chessboard v3 test passed!\n"); return 0; }
a90edfb119d15b0eb754fb82539f2178446d86ff.cu
// /usr/local/cuda-8.0/bin/nvcc -O3 -gencode arch=compute_50,code=sm_50 -m64 FFTShift.cu -lcufft_static -lculibos --relocatable-device-code=true #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <assert.h> #include <cufft.h> #include <cufftXt.h> #include "Utilities.cuh" #include "TimingGPU.cuh" //#define DEBUG #define BLOCKSIZE 256 /*****************************************/ /* FFTSHIFT 1D IN-PLACE MEMORY MOVEMENTS */ /*****************************************/ __global__ void fftshift_1D_inplace_memory_movements(float2 * __restrict__ d_inout, const unsigned int N) { unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N/2) { float2 temp = d_inout[tid]; d_inout[tid] = d_inout[tid + (N / 2)]; d_inout[tid + (N / 2)] = temp; } } /*********************************************/ /* FFTSHIFT 1D OUT-OF-PLACE MEMORY MOVEMENTS */ /*********************************************/ __global__ void fftshift_1D_outofplace_memory_movements(const float2 * __restrict__ d_in, float2 * __restrict__ d_out, const unsigned int N) { unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N/2) { d_out[tid] = d_in[tid + (N / 2)]; d_out[tid + (N / 2)] = d_in[tid]; } } /**********************************************/ /* FFTSHIFT 1D INPLACE CHESSBOARD - VERSION 1 */ /**********************************************/ __device__ float2 fftshift_1D_chessboard_callback_v1(void *d_in, size_t offset, void *callerInfo, void *sharedPtr) { float a = (float)(1-2*((int)offset%2)); float2 out = ((float2*)d_in)[offset]; out.x = out.x * a; out.y = out.y * a; return out; } __device__ cufftCallbackLoadC fftshift_1D_chessboard_callback_v1_Ptr = fftshift_1D_chessboard_callback_v1; /**********************************************/ /* FFTSHIFT 1D INPLACE CHESSBOARD - VERSION 2 */ /**********************************************/ __device__ float2 fftshift_1D_chessboard_callback_v2(void *d_in, size_t offset, void *callerInfo, void *sharedPtr) { float a = pow(-1.,(double)(offset&1)); float2 out = ((float2*)d_in)[offset]; out.x = out.x * a; out.y = out.y * a; return out; } __device__ cufftCallbackLoadC fftshift_1D_chessboard_callback_v2_Ptr = fftshift_1D_chessboard_callback_v2; /**********************************************/ /* FFTSHIFT 1D INPLACE CHESSBOARD - VERSION 3 */ /**********************************************/ __device__ float2 fftshift_1D_chessboard_callback_v3(void *d_in, size_t offset, void *callerInfo, void *sharedPtr) { float2 out = ((float2*)d_in)[offset]; if ((int)offset&1) { out.x = -out.x; out.y = -out.y; } return out; } __device__ cufftCallbackLoadC fftshift_1D_chessboard_callback_v3_Ptr = fftshift_1D_chessboard_callback_v3; /********/ /* MAIN */ /********/ int main() { const int N = 131072; // const int N = 16; TimingGPU timerGPU; // --- Host side input array float2 *h_vect = (float2 *)malloc(N * sizeof(float2)); for (int i = 0; i < N; i++) { h_vect[i].x = (float)rand() / (float)RAND_MAX; h_vect[i].y = (float)rand() / (float)RAND_MAX; } // --- Host side output arrays float2 *h_out1 = (float2 *)malloc(N * sizeof(float2)); float2 *h_out2 = (float2 *)malloc(N * sizeof(float2)); float2 *h_out3 = (float2 *)malloc(N * sizeof(float2)); float2 *h_out4 = (float2 *)malloc(N * sizeof(float2)); float2 *h_out5 = (float2 *)malloc(N * sizeof(float2)); // --- Device side input arrays float2 *d_vect1; gpuErrchk(cudaMalloc(&d_vect1, N * sizeof(float2))); float2 *d_vect2; gpuErrchk(cudaMalloc(&d_vect2, N * sizeof(float2))); float2 *d_vect3; gpuErrchk(cudaMalloc(&d_vect3, N * sizeof(float2))); float2 *d_vect4; gpuErrchk(cudaMalloc(&d_vect4, N * sizeof(float2))); float2 *d_vect5; gpuErrchk(cudaMalloc(&d_vect5, N * sizeof(float2))); gpuErrchk(cudaMemcpy(d_vect1, h_vect, N * sizeof(float2), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_vect2, h_vect, N * sizeof(float2), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_vect3, h_vect, N * sizeof(float2), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_vect4, h_vect, N * sizeof(float2), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_vect5, h_vect, N * sizeof(float2), cudaMemcpyHostToDevice)); // --- Device side output arrays float2 *d_out1; gpuErrchk(cudaMalloc(&d_out1, N * sizeof(float2))); float2 *d_out2; gpuErrchk(cudaMalloc(&d_out2, N * sizeof(float2))); float2 *d_out3; gpuErrchk(cudaMalloc(&d_out3, N * sizeof(float2))); float2 *d_out4; gpuErrchk(cudaMalloc(&d_out4, N * sizeof(float2))); float2 *d_out5; gpuErrchk(cudaMalloc(&d_out5, N * sizeof(float2))); /***************************************************************/ /* VERSION 1: cuFFT + IN-PLACE MEMORY MOVEMENTS BASED FFTSHIFT */ /***************************************************************/ cufftHandle planinverse; cufftSafeCall(cufftPlan1d(&planinverse, N, CUFFT_C2C, 1)); timerGPU.StartCounter(); cufftSafeCall(cufftExecC2C(planinverse, d_vect1, d_vect1, CUFFT_INVERSE)); fftshift_1D_inplace_memory_movements<<<iDivUp(N/2, BLOCKSIZE), BLOCKSIZE>>>(d_vect1, N); #ifdef DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif printf("In-place memory movements elapsed time: %3.3f ms \n", timerGPU.GetCounter()); gpuErrchk(cudaMemcpy(h_out1, d_vect1, N * sizeof(float2), cudaMemcpyDeviceToHost)); /*******************************************************************/ /* VERSION 2: cuFFT + OUT-OF-PLACE MEMORY MOVEMENTS BASED FFTSHIFT */ /*******************************************************************/ cufftHandle planinverse_v1; cufftSafeCall(cufftPlan1d(&planinverse_v1, N, CUFFT_C2C, 1)); timerGPU.StartCounter(); cufftSafeCall(cufftExecC2C(planinverse_v1, d_vect2, d_vect2, CUFFT_INVERSE)); fftshift_1D_outofplace_memory_movements<<<iDivUp(N/2, BLOCKSIZE), BLOCKSIZE>>>(d_vect2, d_out2, N); #ifdef DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif printf("Out-of-place memory movements elapsed time: %3.3f ms \n", timerGPU.GetCounter()); gpuErrchk(cudaMemcpy(h_out2, d_out2, N * sizeof(float2), cudaMemcpyDeviceToHost)); // --- Checking the results for (int i=0; i<N; i++) if ((h_out1[i].x != h_out2[i].x)||(h_out1[i].y != h_out2[i].y)) { printf("Out-of-place memory movements test failed!\n"); return 0; } printf("Out-of-place memory movements test passed!\n"); /***************************************************/ /* VERSION 3: CHESSBOARD MULTIPLICATION V1 + cuFFT */ /***************************************************/ cufftCallbackLoadC hfftshift_1D_chessboard_callback_v1_Ptr; gpuErrchk(cudaMemcpyFromSymbol(&hfftshift_1D_chessboard_callback_v1_Ptr, fftshift_1D_chessboard_callback_v1_Ptr, sizeof(hfftshift_1D_chessboard_callback_v1_Ptr))); cufftHandle planinverse_v2; cufftSafeCall(cufftCreate(&planinverse_v2)); size_t work_size_v2; cufftSafeCall(cufftMakePlan1d(planinverse_v2, N, CUFFT_C2C, 1, &work_size_v2)); cufftSafeCall(cufftXtSetCallback(planinverse_v2, (void **)&hfftshift_1D_chessboard_callback_v1_Ptr, CUFFT_CB_LD_COMPLEX, NULL)); timerGPU.StartCounter(); cufftSafeCall(cufftExecC2C(planinverse_v2, d_vect3, d_out3, CUFFT_INVERSE)); printf("Chessboard v1 elapsed time: %3.3f ms \n", timerGPU.GetCounter()); gpuErrchk(cudaMemcpy(h_out3, d_out3, N*sizeof(float2), cudaMemcpyDeviceToHost)); // --- Checking the results for (int i=0; i<N; i++) if ((h_out1[i].x != h_out3[i].x)||(h_out1[i].y != h_out3[i].y)) { printf("Chessboard v1 test failed!\n"); return 0; } printf("Chessboard v1 test passed!\n"); /****************************************/ /* CHESSBOARD MULTIPLICATION V2 + cuFFT */ /****************************************/ cufftCallbackLoadC hfftshift_1D_chessboard_callback_v2_Ptr; gpuErrchk(cudaMemcpyFromSymbol(&hfftshift_1D_chessboard_callback_v2_Ptr, fftshift_1D_chessboard_callback_v2_Ptr, sizeof(hfftshift_1D_chessboard_callback_v2_Ptr))); cufftHandle planinverse_v3; cufftSafeCall(cufftCreate(&planinverse_v3)); size_t work_size_v3; cufftSafeCall(cufftMakePlan1d(planinverse_v3, N, CUFFT_C2C, 1, &work_size_v3)); cufftSafeCall(cufftXtSetCallback(planinverse_v3, (void **)&hfftshift_1D_chessboard_callback_v2_Ptr, CUFFT_CB_LD_COMPLEX, 0)); timerGPU.StartCounter(); cufftSafeCall(cufftExecC2C(planinverse_v3, d_vect4, d_out4, CUFFT_INVERSE)); printf("Chessboard v2 elapsed time: %3.3f ms \n", timerGPU.GetCounter()); gpuErrchk(cudaMemcpy(h_out4, d_out4, N*sizeof(float2), cudaMemcpyDeviceToHost)); // --- Checking the results for (int i=0; i<N; i++) if ((h_out1[i].x != h_out4[i].x)||(h_out1[i].y != h_out4[i].y)) { printf("Chessboard v2 test failed!\n"); return 0; } printf("Chessboard v2 test passed!\n"); /****************************************/ /* CHESSBOARD MULTIPLICATION V3 + cuFFT */ /****************************************/ cufftCallbackLoadC hfftshift_1D_chessboard_callback_v3_Ptr; gpuErrchk(cudaMemcpyFromSymbol(&hfftshift_1D_chessboard_callback_v3_Ptr, fftshift_1D_chessboard_callback_v3_Ptr, sizeof(hfftshift_1D_chessboard_callback_v3_Ptr))); cufftHandle planinverse_v4; cufftSafeCall(cufftCreate(&planinverse_v4)); size_t work_size_v4; cufftSafeCall(cufftMakePlan1d(planinverse_v4, N, CUFFT_C2C, 1, &work_size_v4)); cufftSafeCall(cufftXtSetCallback(planinverse_v4, (void **)&hfftshift_1D_chessboard_callback_v3_Ptr, CUFFT_CB_LD_COMPLEX, 0)); timerGPU.StartCounter(); cufftSafeCall(cufftExecC2C(planinverse_v4, d_vect5, d_out5, CUFFT_INVERSE)); printf("Chessboard v3 elapsed time: %3.3f ms \n", timerGPU.GetCounter()); gpuErrchk(cudaMemcpy(h_out5, d_out5, N*sizeof(float2), cudaMemcpyDeviceToHost)); // --- Checking the results for (int i=0; i<N; i++) if ((h_out1[i].x != h_out5[i].x)||(h_out1[i].y != h_out5[i].y)) { printf("Chessboard v3 test failed!\n"); return 0; } printf("Chessboard v3 test passed!\n"); return 0; }
1936329465d0fe3da6b584a8ab9836f5f460630f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ndt_gpu/Registration.h" #include "ndt_gpu/debug.h" #include <iostream> #include "glog/logging.h" using std::endl; namespace gpu { GRegistration::GRegistration() { max_iterations_ = 0; x_ = y_ = z_ = NULL; points_number_ = 0; trans_x_ = trans_y_ = trans_z_ = NULL; converged_ = false; nr_iterations_ = 0; transformation_epsilon_ = 0; target_cloud_updated_ = true; target_points_number_ = 0; target_x_ = target_y_ = target_z_ = NULL; is_copied_ = false; } GRegistration::GRegistration(const GRegistration &other) { transformation_epsilon_ = other.transformation_epsilon_; max_iterations_ = other.max_iterations_; //Original scanned point clouds x_ = other.x_; y_ = other.y_; z_ = other.z_; points_number_ = other.points_number_; trans_x_ = other.trans_x_; trans_y_ = other.trans_y_; trans_z_ = other.trans_z_; converged_ = other.converged_; nr_iterations_ = other.nr_iterations_; final_transformation_ = other.final_transformation_; transformation_ = other.transformation_; previous_transformation_ = other.previous_transformation_; target_cloud_updated_ = other.target_cloud_updated_; target_x_ = other.target_x_; target_y_ = other.target_y_; target_z_ = other.target_z_; target_points_number_ = other.target_points_number_; is_copied_ = true; } GRegistration::~GRegistration() { if (!is_copied_) { if (x_ != NULL) { checkCudaErrors(hipFree(x_)); x_ = NULL; } if (y_ != NULL) { checkCudaErrors(hipFree(y_)); y_ = NULL; } if (z_ != NULL) { checkCudaErrors(hipFree(z_)); z_ = NULL; } if (trans_x_ != NULL) { checkCudaErrors(hipFree(trans_x_)); trans_x_ = NULL; } if (trans_y_ != NULL) { checkCudaErrors(hipFree(trans_y_)); trans_y_ = NULL; } if (trans_z_ != NULL) { checkCudaErrors(hipFree(trans_z_)); trans_z_ = NULL; } if (target_x_ != NULL) { checkCudaErrors(hipFree(target_x_)); target_x_ = NULL; } if (target_y_ != NULL) { checkCudaErrors(hipFree(target_y_)); target_y_ = NULL; } if (target_z_ != NULL) { checkCudaErrors(hipFree(target_z_)); target_z_ = NULL; } } } void GRegistration::setTransformationEpsilon(double trans_eps) { transformation_epsilon_ = trans_eps; } double GRegistration::getTransformationEpsilon() const { return transformation_epsilon_; } void GRegistration::setMaximumIterations(int max_itr) { max_iterations_ = max_itr; } int GRegistration::getMaximumIterations() const { return max_iterations_; } Eigen::Matrix<float, 4, 4> GRegistration::getFinalTransformation() const { return final_transformation_; } int GRegistration::getFinalNumIteration() const { return nr_iterations_; } bool GRegistration::hasConverged() const { return converged_; } template <typename T> __global__ void convertInput(T *input, float *out_x, float *out_y, float *out_z, int point_num) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < point_num; i += stride) { T tmp = input[i]; out_x[i] = tmp.x; out_y[i] = tmp.y; out_z[i] = tmp.z; } } void GRegistration::setInputSource(pcl::PointCloud<pcl::PointXYZI>::Ptr input) { //Convert point cloud to float x, y, z if (input->size() > 0) { points_number_ = input->size(); pcl::PointXYZI *tmp; checkCudaErrors(hipMalloc(&tmp, sizeof(pcl::PointXYZI) * points_number_)); pcl::PointXYZI *host_tmp = input->points.data(); // Pin the host buffer for accelerating the memory copy #ifndef __aarch64__ checkCudaErrors(hipHostRegister(host_tmp, sizeof(pcl::PointXYZI) * points_number_, hipHostRegisterDefault)); #endif checkCudaErrors(hipMemcpy(tmp, host_tmp, sizeof(pcl::PointXYZI) * points_number_, hipMemcpyHostToDevice)); if (x_ != NULL) { checkCudaErrors(hipFree(x_)); x_ = NULL; } if (y_ != NULL) { checkCudaErrors(hipFree(y_)); y_ = NULL; } if (z_ != NULL) { checkCudaErrors(hipFree(z_)); z_ = NULL; } checkCudaErrors(hipMalloc(&x_, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&y_, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&z_, sizeof(float) * points_number_)); int block_x = (points_number_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_number_; int grid_x = (points_number_ - 1) / block_x + 1; hipLaunchKernelGGL(( convertInput<pcl::PointXYZI>), dim3(grid_x), dim3(block_x), 0, 0, tmp, x_, y_, z_, points_number_); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); if (trans_x_ != NULL) { checkCudaErrors(hipFree(trans_x_)); trans_x_ = NULL; } if (trans_y_ != NULL) { checkCudaErrors(hipFree(trans_y_)); trans_y_ = NULL; } if (trans_z_ != NULL) { checkCudaErrors(hipFree(trans_z_)); trans_z_ = NULL; } checkCudaErrors(hipMalloc(&trans_x_, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&trans_y_, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&trans_z_, sizeof(float) * points_number_)); // Initially, also copy scanned points to transformed buffers checkCudaErrors(hipMemcpy(trans_x_, x_, sizeof(float) * points_number_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(trans_y_, y_, sizeof(float) * points_number_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(trans_z_, z_, sizeof(float) * points_number_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipFree(tmp)); // Unpin host buffer #ifndef __aarch64__ checkCudaErrors(hipHostUnregister(host_tmp)); #endif } } void GRegistration::setInputSource(pcl::PointCloud<pcl::PointXYZ>::Ptr input) { //Convert point cloud to float x, y, z if (input->size() > 0) { points_number_ = input->size(); pcl::PointXYZ *tmp; checkCudaErrors(hipMalloc(&tmp, sizeof(pcl::PointXYZ) * points_number_)); pcl::PointXYZ *host_tmp = input->points.data(); // Pin the host buffer for accelerating the memory copy #ifndef __aarch64__ checkCudaErrors(hipHostRegister(host_tmp, sizeof(pcl::PointXYZ) * points_number_, hipHostRegisterDefault)); #endif checkCudaErrors(hipMemcpy(tmp, host_tmp, sizeof(pcl::PointXYZ) * points_number_, hipMemcpyHostToDevice)); if (x_ != NULL) { checkCudaErrors(hipFree(x_)); x_ = NULL; } if (y_ != NULL) { checkCudaErrors(hipFree(y_)); y_ = NULL; } if (z_ != NULL) { checkCudaErrors(hipFree(z_)); z_ = NULL; } checkCudaErrors(hipMalloc(&x_, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&y_, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&z_, sizeof(float) * points_number_)); int block_x = (points_number_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_number_; int grid_x = (points_number_ - 1) / block_x + 1; hipLaunchKernelGGL(( convertInput<pcl::PointXYZ>), dim3(grid_x), dim3(block_x), 0, 0, tmp, x_, y_, z_, points_number_); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); if (trans_x_ != NULL) { checkCudaErrors(hipFree(trans_x_)); trans_x_ = NULL; } if (trans_y_ != NULL) { checkCudaErrors(hipFree(trans_y_)); trans_y_ = NULL; } if (trans_z_ != NULL) { checkCudaErrors(hipFree(trans_z_)); trans_z_ = NULL; } checkCudaErrors(hipMalloc(&trans_x_, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&trans_y_, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&trans_z_, sizeof(float) * points_number_)); checkCudaErrors(hipMemcpy(trans_x_, x_, sizeof(float) * points_number_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(trans_y_, y_, sizeof(float) * points_number_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(trans_z_, z_, sizeof(float) * points_number_, hipMemcpyDeviceToDevice)); checkCudaErrors(hipFree(tmp)); #ifndef __aarch64__ checkCudaErrors(hipHostUnregister(host_tmp)); #endif } } //Set input MAP data void GRegistration::setInputTarget(pcl::PointCloud<pcl::PointXYZI>::Ptr input) { if (input->size() > 0) { target_points_number_ = input->size(); LOG(INFO)<<"target_points number:"<<target_points_number_<<endl; pcl::PointXYZI *tmp; checkCudaErrors(hipMalloc(&tmp, sizeof(pcl::PointXYZI) * target_points_number_)); LOG(INFO)<<"dev_space alloced."<<endl; pcl::PointXYZI *host_tmp = input->points.data(); #ifndef __aarch64__ checkCudaErrors(hipHostRegister(host_tmp, sizeof(pcl::PointXYZI) * target_points_number_, hipHostRegisterDefault)); LOG(INFO)<<"defined aarch64!!!!"<<endl; #endif checkCudaErrors(hipMemcpy(tmp, host_tmp, sizeof(pcl::PointXYZI) * target_points_number_, hipMemcpyHostToDevice)); LOG(INFO)<<"after memcpy()"<<endl; if (target_x_ != NULL) { checkCudaErrors(hipFree(target_x_)); target_x_ = NULL; } if (target_y_ != NULL) { checkCudaErrors(hipFree(target_y_)); target_y_ = NULL; } if (target_z_ != NULL) { checkCudaErrors(hipFree(target_z_)); target_z_ = NULL; } LOG(INFO)<<"check old pointer released"<<endl; checkCudaErrors(hipMalloc(&target_x_, sizeof(float) * target_points_number_)); checkCudaErrors(hipMalloc(&target_y_, sizeof(float) * target_points_number_)); checkCudaErrors(hipMalloc(&target_z_, sizeof(float) * target_points_number_)); LOG(INFO)<<"malloc points 2."<<endl; int block_x = (target_points_number_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : target_points_number_; int grid_x = (target_points_number_ - 1) / block_x + 1; hipLaunchKernelGGL(( convertInput<pcl::PointXYZI>), dim3(grid_x), dim3(block_x), 0, 0, tmp, target_x_, target_y_, target_z_, target_points_number_); LOG(INFO)<<"convert mapping."<<endl; checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); #ifndef __aarch64__ checkCudaErrors(hipHostUnregister(host_tmp)); #endif checkCudaErrors(hipFree(tmp)); LOG(INFO)<<"free register."<<endl; } } void GRegistration::setInputTarget(pcl::PointCloud<pcl::PointXYZ>::Ptr input) { if (input->size() > 0) { target_points_number_ = input->size(); pcl::PointXYZ *tmp; checkCudaErrors(hipMalloc(&tmp, sizeof(pcl::PointXYZ) * target_points_number_)); pcl::PointXYZ *host_tmp = input->points.data(); #ifndef __aarch64__ checkCudaErrors(hipHostRegister(host_tmp, sizeof(pcl::PointXYZ) * target_points_number_, hipHostRegisterDefault)); #endif checkCudaErrors(hipMemcpy(tmp, host_tmp, sizeof(pcl::PointXYZ) * target_points_number_, hipMemcpyHostToDevice)); if (target_x_ != NULL) { checkCudaErrors(hipFree(target_x_)); target_x_ = NULL; } if (target_y_ != NULL) { checkCudaErrors(hipFree(target_y_)); target_y_ = NULL; } if (target_z_ != NULL) { checkCudaErrors(hipFree(target_z_)); target_z_ = NULL; } checkCudaErrors(hipMalloc(&target_x_, sizeof(float) * target_points_number_)); checkCudaErrors(hipMalloc(&target_y_, sizeof(float) * target_points_number_)); checkCudaErrors(hipMalloc(&target_z_, sizeof(float) * target_points_number_)); int block_x = (target_points_number_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : target_points_number_; int grid_x = (target_points_number_ - 1) / block_x + 1; hipLaunchKernelGGL(( convertInput<pcl::PointXYZ>), dim3(grid_x), dim3(block_x), 0, 0, tmp, target_x_, target_y_, target_z_, target_points_number_); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(tmp)); #ifndef __aarch64__ checkCudaErrors(hipHostUnregister(host_tmp)); #endif } } void GRegistration::align(const Eigen::Matrix<float, 4, 4> &guess) { converged_ = false; final_transformation_ = transformation_ = previous_transformation_ = Eigen::Matrix<float, 4, 4>::Identity(); computeTransformation(guess); } void GRegistration::computeTransformation(const Eigen::Matrix<float, 4, 4> &guess) { printf("Unsupported by Registration\n"); } }
1936329465d0fe3da6b584a8ab9836f5f460630f.cu
#include "ndt_gpu/Registration.h" #include "ndt_gpu/debug.h" #include <iostream> #include "glog/logging.h" using std::endl; namespace gpu { GRegistration::GRegistration() { max_iterations_ = 0; x_ = y_ = z_ = NULL; points_number_ = 0; trans_x_ = trans_y_ = trans_z_ = NULL; converged_ = false; nr_iterations_ = 0; transformation_epsilon_ = 0; target_cloud_updated_ = true; target_points_number_ = 0; target_x_ = target_y_ = target_z_ = NULL; is_copied_ = false; } GRegistration::GRegistration(const GRegistration &other) { transformation_epsilon_ = other.transformation_epsilon_; max_iterations_ = other.max_iterations_; //Original scanned point clouds x_ = other.x_; y_ = other.y_; z_ = other.z_; points_number_ = other.points_number_; trans_x_ = other.trans_x_; trans_y_ = other.trans_y_; trans_z_ = other.trans_z_; converged_ = other.converged_; nr_iterations_ = other.nr_iterations_; final_transformation_ = other.final_transformation_; transformation_ = other.transformation_; previous_transformation_ = other.previous_transformation_; target_cloud_updated_ = other.target_cloud_updated_; target_x_ = other.target_x_; target_y_ = other.target_y_; target_z_ = other.target_z_; target_points_number_ = other.target_points_number_; is_copied_ = true; } GRegistration::~GRegistration() { if (!is_copied_) { if (x_ != NULL) { checkCudaErrors(cudaFree(x_)); x_ = NULL; } if (y_ != NULL) { checkCudaErrors(cudaFree(y_)); y_ = NULL; } if (z_ != NULL) { checkCudaErrors(cudaFree(z_)); z_ = NULL; } if (trans_x_ != NULL) { checkCudaErrors(cudaFree(trans_x_)); trans_x_ = NULL; } if (trans_y_ != NULL) { checkCudaErrors(cudaFree(trans_y_)); trans_y_ = NULL; } if (trans_z_ != NULL) { checkCudaErrors(cudaFree(trans_z_)); trans_z_ = NULL; } if (target_x_ != NULL) { checkCudaErrors(cudaFree(target_x_)); target_x_ = NULL; } if (target_y_ != NULL) { checkCudaErrors(cudaFree(target_y_)); target_y_ = NULL; } if (target_z_ != NULL) { checkCudaErrors(cudaFree(target_z_)); target_z_ = NULL; } } } void GRegistration::setTransformationEpsilon(double trans_eps) { transformation_epsilon_ = trans_eps; } double GRegistration::getTransformationEpsilon() const { return transformation_epsilon_; } void GRegistration::setMaximumIterations(int max_itr) { max_iterations_ = max_itr; } int GRegistration::getMaximumIterations() const { return max_iterations_; } Eigen::Matrix<float, 4, 4> GRegistration::getFinalTransformation() const { return final_transformation_; } int GRegistration::getFinalNumIteration() const { return nr_iterations_; } bool GRegistration::hasConverged() const { return converged_; } template <typename T> __global__ void convertInput(T *input, float *out_x, float *out_y, float *out_z, int point_num) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < point_num; i += stride) { T tmp = input[i]; out_x[i] = tmp.x; out_y[i] = tmp.y; out_z[i] = tmp.z; } } void GRegistration::setInputSource(pcl::PointCloud<pcl::PointXYZI>::Ptr input) { //Convert point cloud to float x, y, z if (input->size() > 0) { points_number_ = input->size(); pcl::PointXYZI *tmp; checkCudaErrors(cudaMalloc(&tmp, sizeof(pcl::PointXYZI) * points_number_)); pcl::PointXYZI *host_tmp = input->points.data(); // Pin the host buffer for accelerating the memory copy #ifndef __aarch64__ checkCudaErrors(cudaHostRegister(host_tmp, sizeof(pcl::PointXYZI) * points_number_, cudaHostRegisterDefault)); #endif checkCudaErrors(cudaMemcpy(tmp, host_tmp, sizeof(pcl::PointXYZI) * points_number_, cudaMemcpyHostToDevice)); if (x_ != NULL) { checkCudaErrors(cudaFree(x_)); x_ = NULL; } if (y_ != NULL) { checkCudaErrors(cudaFree(y_)); y_ = NULL; } if (z_ != NULL) { checkCudaErrors(cudaFree(z_)); z_ = NULL; } checkCudaErrors(cudaMalloc(&x_, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&y_, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&z_, sizeof(float) * points_number_)); int block_x = (points_number_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_number_; int grid_x = (points_number_ - 1) / block_x + 1; convertInput<pcl::PointXYZI><<<grid_x, block_x>>>(tmp, x_, y_, z_, points_number_); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); if (trans_x_ != NULL) { checkCudaErrors(cudaFree(trans_x_)); trans_x_ = NULL; } if (trans_y_ != NULL) { checkCudaErrors(cudaFree(trans_y_)); trans_y_ = NULL; } if (trans_z_ != NULL) { checkCudaErrors(cudaFree(trans_z_)); trans_z_ = NULL; } checkCudaErrors(cudaMalloc(&trans_x_, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&trans_y_, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&trans_z_, sizeof(float) * points_number_)); // Initially, also copy scanned points to transformed buffers checkCudaErrors(cudaMemcpy(trans_x_, x_, sizeof(float) * points_number_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(trans_y_, y_, sizeof(float) * points_number_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(trans_z_, z_, sizeof(float) * points_number_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaFree(tmp)); // Unpin host buffer #ifndef __aarch64__ checkCudaErrors(cudaHostUnregister(host_tmp)); #endif } } void GRegistration::setInputSource(pcl::PointCloud<pcl::PointXYZ>::Ptr input) { //Convert point cloud to float x, y, z if (input->size() > 0) { points_number_ = input->size(); pcl::PointXYZ *tmp; checkCudaErrors(cudaMalloc(&tmp, sizeof(pcl::PointXYZ) * points_number_)); pcl::PointXYZ *host_tmp = input->points.data(); // Pin the host buffer for accelerating the memory copy #ifndef __aarch64__ checkCudaErrors(cudaHostRegister(host_tmp, sizeof(pcl::PointXYZ) * points_number_, cudaHostRegisterDefault)); #endif checkCudaErrors(cudaMemcpy(tmp, host_tmp, sizeof(pcl::PointXYZ) * points_number_, cudaMemcpyHostToDevice)); if (x_ != NULL) { checkCudaErrors(cudaFree(x_)); x_ = NULL; } if (y_ != NULL) { checkCudaErrors(cudaFree(y_)); y_ = NULL; } if (z_ != NULL) { checkCudaErrors(cudaFree(z_)); z_ = NULL; } checkCudaErrors(cudaMalloc(&x_, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&y_, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&z_, sizeof(float) * points_number_)); int block_x = (points_number_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_number_; int grid_x = (points_number_ - 1) / block_x + 1; convertInput<pcl::PointXYZ><<<grid_x, block_x>>>(tmp, x_, y_, z_, points_number_); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); if (trans_x_ != NULL) { checkCudaErrors(cudaFree(trans_x_)); trans_x_ = NULL; } if (trans_y_ != NULL) { checkCudaErrors(cudaFree(trans_y_)); trans_y_ = NULL; } if (trans_z_ != NULL) { checkCudaErrors(cudaFree(trans_z_)); trans_z_ = NULL; } checkCudaErrors(cudaMalloc(&trans_x_, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&trans_y_, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&trans_z_, sizeof(float) * points_number_)); checkCudaErrors(cudaMemcpy(trans_x_, x_, sizeof(float) * points_number_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(trans_y_, y_, sizeof(float) * points_number_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(trans_z_, z_, sizeof(float) * points_number_, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaFree(tmp)); #ifndef __aarch64__ checkCudaErrors(cudaHostUnregister(host_tmp)); #endif } } //Set input MAP data void GRegistration::setInputTarget(pcl::PointCloud<pcl::PointXYZI>::Ptr input) { if (input->size() > 0) { target_points_number_ = input->size(); LOG(INFO)<<"target_points number:"<<target_points_number_<<endl; pcl::PointXYZI *tmp; checkCudaErrors(cudaMalloc(&tmp, sizeof(pcl::PointXYZI) * target_points_number_)); LOG(INFO)<<"dev_space alloced."<<endl; pcl::PointXYZI *host_tmp = input->points.data(); #ifndef __aarch64__ checkCudaErrors(cudaHostRegister(host_tmp, sizeof(pcl::PointXYZI) * target_points_number_, cudaHostRegisterDefault)); LOG(INFO)<<"defined aarch64!!!!"<<endl; #endif checkCudaErrors(cudaMemcpy(tmp, host_tmp, sizeof(pcl::PointXYZI) * target_points_number_, cudaMemcpyHostToDevice)); LOG(INFO)<<"after memcpy()"<<endl; if (target_x_ != NULL) { checkCudaErrors(cudaFree(target_x_)); target_x_ = NULL; } if (target_y_ != NULL) { checkCudaErrors(cudaFree(target_y_)); target_y_ = NULL; } if (target_z_ != NULL) { checkCudaErrors(cudaFree(target_z_)); target_z_ = NULL; } LOG(INFO)<<"check old pointer released"<<endl; checkCudaErrors(cudaMalloc(&target_x_, sizeof(float) * target_points_number_)); checkCudaErrors(cudaMalloc(&target_y_, sizeof(float) * target_points_number_)); checkCudaErrors(cudaMalloc(&target_z_, sizeof(float) * target_points_number_)); LOG(INFO)<<"malloc points 2."<<endl; int block_x = (target_points_number_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : target_points_number_; int grid_x = (target_points_number_ - 1) / block_x + 1; convertInput<pcl::PointXYZI><<<grid_x, block_x>>>(tmp, target_x_, target_y_, target_z_, target_points_number_); LOG(INFO)<<"convert mapping."<<endl; checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); #ifndef __aarch64__ checkCudaErrors(cudaHostUnregister(host_tmp)); #endif checkCudaErrors(cudaFree(tmp)); LOG(INFO)<<"free register."<<endl; } } void GRegistration::setInputTarget(pcl::PointCloud<pcl::PointXYZ>::Ptr input) { if (input->size() > 0) { target_points_number_ = input->size(); pcl::PointXYZ *tmp; checkCudaErrors(cudaMalloc(&tmp, sizeof(pcl::PointXYZ) * target_points_number_)); pcl::PointXYZ *host_tmp = input->points.data(); #ifndef __aarch64__ checkCudaErrors(cudaHostRegister(host_tmp, sizeof(pcl::PointXYZ) * target_points_number_, cudaHostRegisterDefault)); #endif checkCudaErrors(cudaMemcpy(tmp, host_tmp, sizeof(pcl::PointXYZ) * target_points_number_, cudaMemcpyHostToDevice)); if (target_x_ != NULL) { checkCudaErrors(cudaFree(target_x_)); target_x_ = NULL; } if (target_y_ != NULL) { checkCudaErrors(cudaFree(target_y_)); target_y_ = NULL; } if (target_z_ != NULL) { checkCudaErrors(cudaFree(target_z_)); target_z_ = NULL; } checkCudaErrors(cudaMalloc(&target_x_, sizeof(float) * target_points_number_)); checkCudaErrors(cudaMalloc(&target_y_, sizeof(float) * target_points_number_)); checkCudaErrors(cudaMalloc(&target_z_, sizeof(float) * target_points_number_)); int block_x = (target_points_number_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : target_points_number_; int grid_x = (target_points_number_ - 1) / block_x + 1; convertInput<pcl::PointXYZ><<<grid_x, block_x>>>(tmp, target_x_, target_y_, target_z_, target_points_number_); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(tmp)); #ifndef __aarch64__ checkCudaErrors(cudaHostUnregister(host_tmp)); #endif } } void GRegistration::align(const Eigen::Matrix<float, 4, 4> &guess) { converged_ = false; final_transformation_ = transformation_ = previous_transformation_ = Eigen::Matrix<float, 4, 4>::Identity(); computeTransformation(guess); } void GRegistration::computeTransformation(const Eigen::Matrix<float, 4, 4> &guess) { printf("Unsupported by Registration\n"); } }
6caa22c371dd4128fbc5746350d2590d1ce709e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "block_reduce.h" extern "C" __global__ void deviceReduceKernel(long* in, long* out, int N) { long sum = 0; //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) out[blockIdx.x] = sum; }
6caa22c371dd4128fbc5746350d2590d1ce709e4.cu
#include "block_reduce.h" extern "C" __global__ void deviceReduceKernel(long* in, long* out, int N) { long sum = 0; //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) out[blockIdx.x] = sum; }
fccadb016e38e651478ff5afb191ed28fefa8a22.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/data_simulator.hpp" #include "HugeCTR/include/embeddings/distributed_slot_sparse_embedding_hash.hpp" #include "HugeCTR/include/utils.cuh" #include <numeric> #include <experimental/filesystem> #include <thrust/sort.h> #include <thrust/execution_policy.h> namespace fs = std::experimental::filesystem; namespace HugeCTR { template <typename TypeHashKey, typename TypeEmbeddingComp> DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>:: DistributedSlotSparseEmbeddingHash( const Tensors2<TypeHashKey> &train_row_offsets_tensors, const Tensors2<TypeHashKey> &train_value_tensors, const std::vector<std::shared_ptr<size_t>> &train_nnz_array, const Tensors2<TypeHashKey> &evaluate_row_offsets_tensors, const Tensors2<TypeHashKey> &evaluate_value_tensors, const std::vector<std::shared_ptr<size_t>> &evaluate_nnz_array, const SparseEmbeddingHashParams &embedding_params, const std::shared_ptr<ResourceManager> &resource_manager) : Base(train_row_offsets_tensors, train_value_tensors, train_nnz_array, evaluate_row_offsets_tensors, evaluate_value_tensors, evaluate_nnz_array, Embedding_t::DistributedSlotSparseEmbeddingHash, embedding_params, resource_manager) { try { // CAUSION: can not decide how many <key,value> pairs in each GPU, because the GPU // distribution is computed by (key%gpu_count). In order to not allocate the total size of // hash table on each GPU, meanwhile get a better performance by a unfull hash table, the // users need to set the param "load_factor"(load_factor<1). max_vocabulary_size_per_gpu_ = Base::get_max_vocabulary_size_per_gpu(); max_vocabulary_size_ = max_vocabulary_size_per_gpu_ * Base::get_resource_manager().get_global_gpu_count(); MESSAGE_("max_vocabulary_size_per_gpu_=" + std::to_string(max_vocabulary_size_per_gpu_)); CudaDeviceContext context; for (size_t id = 0; id < Base::get_resource_manager().get_local_gpu_count(); id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); // new GeneralBuffer objects const std::shared_ptr<GeneralBuffer2<CudaAllocator>> &buf = Base::get_buffer(id); embedding_optimizers_.emplace_back(max_vocabulary_size_per_gpu_, Base::embedding_params_, buf); // new hash table value vectors { Tensor2<float> tensor; buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor); hash_table_value_tensors_.push_back(tensor); } // new hash table value_index that get() from HashTable { Tensor2<size_t> tensor; buf->reserve({1, Base::get_universal_batch_size() * Base::get_max_feature_num()}, &tensor); hash_value_index_tensors_.push_back(tensor); } // new embedding features reduced by hash table values(results of forward) { Tensor2<TypeEmbeddingComp> tensor; buf->reserve({Base::get_universal_batch_size() * Base::get_slot_num(), Base::get_embedding_vec_size()}, &tensor); embedding_feature_tensors_.push_back(tensor); } // new wgrad used by backward { Tensor2<TypeEmbeddingComp> tensor; buf->reserve( {Base::get_batch_size(true) * Base::get_slot_num(), Base::get_embedding_vec_size()}, &tensor); wgrad_tensors_.push_back(tensor); } // new temp tensors used by update_params { Tensor2<TypeHashKey> tensor; buf->reserve({1, Base::get_universal_batch_size() * Base::get_slot_num() + 1}, &tensor); row_offset_allreduce_tensors_.push_back(tensor); } { Tensor2<TypeEmbeddingComp> tensor; buf->reserve({Base::get_universal_batch_size() * Base::get_slot_num(), Base::get_embedding_vec_size()}, &tensor); utest_forward_temp_tensors_.push_back(tensor); } // init GenenralBuffers to do real allocation #ifndef NDEBUG std::cout << " max_feature_num_:" << Base::get_max_feature_num() << std::endl; #endif } hash_tables_.resize(Base::get_resource_manager().get_local_gpu_count()); #pragma omp parallel num_threads(Base::get_resource_manager().get_local_gpu_count()) { size_t id = omp_get_thread_num(); CudaDeviceContext context(Base::get_local_gpu(id).get_device_id()); // construct HashTable object: used to store hash table <key, value_index> hash_tables_[id].reset(new NvHashTable(max_vocabulary_size_per_gpu_)); Base::get_buffer(id)->allocate(); } for (size_t id = 0; id < Base::get_resource_manager().get_local_gpu_count(); id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); embedding_optimizers_[id].initialize(Base::get_local_gpu(id)); } // end of for(int id = 0; id < Base::get_local_gpu_count(); id++) functors_.sync_all_gpus(Base::get_resource_manager()); } catch (const std::runtime_error &rt_err) { std::cerr << rt_err.what() << std::endl; throw; } return; } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::load_parameters( std::string sparse_model) { if (!fs::exists(sparse_model)) { CK_THROW_(Error_t::WrongInput, std::string("Folder ") + sparse_model + " doesn't exist"); } const std::string key_file(sparse_model + "/" + sparse_model + ".key"); const std::string vec_file(sparse_model + "/" + sparse_model + ".vec"); std::ifstream key_stream(key_file, std::ifstream::binary); std::ifstream vec_stream(vec_file, std::ifstream::binary); // check if file is opened successfully if (!vec_stream.is_open() || !key_stream.is_open()) { CK_THROW_(Error_t::WrongInput, "Error: file not open for reading"); } size_t key_file_size_in_byte = fs::file_size(key_file); size_t vec_file_size_in_byte = fs::file_size(vec_file); size_t key_size = sizeof(TypeHashKey); size_t vec_size = sizeof(float) * Base::get_embedding_vec_size(); size_t key_num = key_file_size_in_byte / key_size; size_t vec_num = vec_file_size_in_byte / vec_size; if (key_num != vec_num || key_file_size_in_byte % key_size != 0 || vec_file_size_in_byte % vec_size != 0) { CK_THROW_(Error_t::WrongInput, "Error: file size is not correct"); } auto blobs_buff = GeneralBuffer2<CudaHostAllocator>::create(); Tensor2<TypeHashKey> keys; blobs_buff->reserve({key_num}, &keys); Tensor2<float> embeddings; blobs_buff->reserve({vec_num, Base::get_embedding_vec_size()}, &embeddings); blobs_buff->allocate(); TypeHashKey *key_ptr = keys.get_ptr(); float *embedding_ptr = embeddings.get_ptr(); key_stream.read(reinterpret_cast<char *>(key_ptr), key_file_size_in_byte); vec_stream.read(reinterpret_cast<char *>(embedding_ptr), vec_file_size_in_byte); load_parameters(keys, embeddings, key_num, max_vocabulary_size_, Base::get_embedding_vec_size(), max_vocabulary_size_per_gpu_, hash_table_value_tensors_, hash_tables_); return; } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::load_parameters( BufferBag &buf_bag, size_t num) { const TensorBag2 &keys_bag = buf_bag.keys; const Tensor2<float> &embeddings = buf_bag.embedding; const Tensor2<TypeHashKey> keys = Tensor2<TypeHashKey>::stretch_from(keys_bag); load_parameters(keys, embeddings, num, max_vocabulary_size_, Base::get_embedding_vec_size(), max_vocabulary_size_per_gpu_, hash_table_value_tensors_, hash_tables_); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::load_parameters( const Tensor2<TypeHashKey> &keys, const Tensor2<float> &embeddings, size_t num, size_t vocabulary_size, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, Tensors2<float> &embedding_tensors, std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables) { if (keys.get_dimensions()[0] < num || embeddings.get_dimensions()[0] < num) { CK_THROW_(Error_t::WrongInput, "The rows of keys and embeddings are not consistent."); } if (num > vocabulary_size) { CK_THROW_(Error_t::WrongInput, "Error: hash table file size is larger than hash table vocabulary_size"); } const TypeHashKey *key_ptr = keys.get_ptr(); const float *embedding_ptr = embeddings.get_ptr(); int my_rank = Base::get_resource_manager().get_process_id(); int n_ranks = Base::get_resource_manager().get_num_process(); // define size size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count(); const size_t chunk_size = 1000; size_t hash_table_key_tile_size = 1; size_t hash_table_key_tile_size_in_B = hash_table_key_tile_size * sizeof(TypeHashKey); size_t hash_table_key_chunk_size = hash_table_key_tile_size * chunk_size; size_t hash_table_key_chunk_size_in_B = hash_table_key_chunk_size * sizeof(TypeHashKey); size_t hash_table_value_index_chunk_size_in_B = hash_table_key_chunk_size * sizeof(size_t); size_t hash_table_value_tile_size = embedding_vec_size; size_t hash_table_value_tile_size_in_B = hash_table_value_tile_size * sizeof(float); size_t hash_table_value_chunk_size = hash_table_value_tile_size * chunk_size; size_t hash_table_value_chunk_size_in_B = hash_table_value_chunk_size * sizeof(float); // CAUSION: can not decide how many values for each GPU, so need to allocate enough memory // for each GPU allocate GPU memory for hash_table_value_index std::unique_ptr<size_t[]> tile_counter_per_gpu( new size_t[local_gpu_count]); // <= hash_table_value_index_per_gpu_size memset(tile_counter_per_gpu.get(), 0, sizeof(size_t) * local_gpu_count); std::unique_ptr<size_t[]> tile_counter_in_chunk_per_gpu(new size_t[local_gpu_count]); memset(tile_counter_in_chunk_per_gpu.get(), 0, sizeof(size_t) * local_gpu_count); std::unique_ptr<size_t *[]> d_hash_table_value_index_chunk_per_gpu(new size_t *[local_gpu_count]); CudaDeviceContext context; for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(hipMalloc(&d_hash_table_value_index_chunk_per_gpu[id], hash_table_value_index_chunk_size_in_B)); // initalize to zeros CK_CUDA_THROW_(hipMemsetAsync(d_hash_table_value_index_chunk_per_gpu[id], 0, hash_table_value_index_chunk_size_in_B, Base::get_local_gpu(id).get_stream())); } // sync wait functors_.sync_all_gpus(Base::get_resource_manager()); // CAUSION: can not decide how many values for each GPU, so need to allocate enough memory // for each GPU allocate CPU/GPU memory for hash_table/key/value chunk std::unique_ptr<TypeHashKey *[]> h_hash_table_key_chunk_per_gpu( new TypeHashKey *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { CK_CUDA_THROW_( hipHostMalloc(&h_hash_table_key_chunk_per_gpu[id], hash_table_key_chunk_size_in_B)); } std::unique_ptr<TypeHashKey *[]> d_hash_table_key_chunk_per_gpu( new TypeHashKey *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(hipMalloc(&d_hash_table_key_chunk_per_gpu[id], hash_table_key_chunk_size_in_B)); } std::unique_ptr<float *[]> h_hash_table_value_chunk_per_gpu(new float *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { CK_CUDA_THROW_(hipHostMalloc(&h_hash_table_value_chunk_per_gpu[id], hash_table_value_chunk_size_in_B)); } // do upload size_t loop_num = num / chunk_size; for (size_t i = 0; i < loop_num; i++) { TypeHashKey *key_dst_buf; float *value_dst_buf; for (size_t k = 0; k < chunk_size; k++) { // process a tile in each loop TypeHashKey key = key_ptr[i * chunk_size + k]; size_t gid = key % Base::get_resource_manager().get_global_gpu_count(); // global GPU ID size_t id = Base::get_resource_manager().get_gpu_local_id_from_global_id( gid); // local GPU ID (not gpudevice id) int dst_rank = Base::get_resource_manager().get_process_id_from_gpu_global_id(gid); // node id if (my_rank == dst_rank) { // memcpy hash_table_key to corresponding GPU key_dst_buf = h_hash_table_key_chunk_per_gpu[id] + tile_counter_in_chunk_per_gpu[id] * hash_table_key_tile_size; *key_dst_buf = key; // memcpy hash_table_value to corresponding GPU value_dst_buf = h_hash_table_value_chunk_per_gpu[id] + tile_counter_in_chunk_per_gpu[id] * hash_table_value_tile_size; memcpy(value_dst_buf, embedding_ptr + (i * chunk_size + k) * embedding_vec_size, hash_table_value_tile_size_in_B); tile_counter_in_chunk_per_gpu[id] += 1; } else { continue; } } // end of for(int k = 0; k < (chunk_loop * local_gpu_count); k++) // do HashTable insert <key,value_index> for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); size_t tile_count = tile_counter_in_chunk_per_gpu[id]; // memcpy hash_table_key from CPU to GPU CK_CUDA_THROW_(hipMemcpyAsync(d_hash_table_key_chunk_per_gpu[id], h_hash_table_key_chunk_per_gpu[id], tile_count * sizeof(TypeHashKey), hipMemcpyHostToDevice, Base::get_local_gpu(id).get_stream())); size_t value_index_offset = tile_counter_per_gpu[id]; size_t *value_index_buf = d_hash_table_value_index_chunk_per_gpu[id]; if (tile_count > 0) { // set hash_table_value_index on GPU functors_.memset_liner(value_index_buf, value_index_offset, 1ul, tile_count, Base::get_local_gpu(id).get_stream()); } // do hash table insert <key, value_index> on GPU hash_tables[id]->insert(d_hash_table_key_chunk_per_gpu[id], value_index_buf, tile_count, Base::get_local_gpu(id).get_stream()); size_t value_head = hash_tables[id]->get_and_add_value_head(tile_count, Base::get_local_gpu(id).get_stream()); } // memcpy hash_table_value from CPU to GPU for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); size_t value_chunk_size = tile_counter_in_chunk_per_gpu[id] * embedding_vec_size; size_t value_chunk_offset = tile_counter_per_gpu[id] * embedding_vec_size; float *src_buf = h_hash_table_value_chunk_per_gpu[id]; float *dst_buf = embedding_tensors[id].get_ptr() + value_chunk_offset; CK_CUDA_THROW_(hipMemcpyAsync(dst_buf, src_buf, value_chunk_size * sizeof(float), hipMemcpyHostToDevice, Base::get_local_gpu(id).get_stream())); } functors_.sync_all_gpus(Base::get_resource_manager()); // set counter value for (size_t id = 0; id < local_gpu_count; id++) { tile_counter_per_gpu[id] += tile_counter_in_chunk_per_gpu[id]; tile_counter_in_chunk_per_gpu[id] = 0; // reset chunk counter to zero if (tile_counter_per_gpu[id] > max_vocabulary_size_per_gpu) { char msg[100]{0}; sprintf(msg, "The size of hash table on GPU %zu is out of range %zu\n", id, max_vocabulary_size_per_gpu); CK_THROW_(Error_t::OutOfBound, msg); } } } // end of for(int i = 0; i < loop_num; i++) // process the remaining data(less than a chunk) size_t remain_loop_num = num - loop_num * chunk_size; TypeHashKey *key_dst_buf; size_t *value_index_buf; float *value_dst_buf; for (size_t i = 0; i < remain_loop_num; i++) { TypeHashKey key = key_ptr[loop_num * chunk_size + i]; size_t gid = key % Base::get_resource_manager().get_global_gpu_count(); // global GPU ID size_t id = Base::get_resource_manager().get_gpu_local_id_from_global_id( gid); // local GPU ID (not gpudevice id) int dst_rank = Base::get_resource_manager().get_process_id_from_gpu_global_id(gid); if (my_rank == dst_rank) { context.set_device(Base::get_local_gpu(id).get_device_id()); // memcpy hash_table_key from CPU to GPU key_dst_buf = d_hash_table_key_chunk_per_gpu[id]; CK_CUDA_THROW_(hipMemcpyAsync(key_dst_buf, &key, hash_table_key_tile_size_in_B, hipMemcpyHostToDevice, Base::get_local_gpu(id).get_stream())); // set value_index size_t value_index_offset = tile_counter_per_gpu[id]; value_index_buf = d_hash_table_value_index_chunk_per_gpu[id]; functors_.memset_liner(value_index_buf, value_index_offset, 1ul, 1ul, Base::get_local_gpu(id).get_stream()); // do hash table insert <key, value_index> on GPU hash_tables[id]->insert(d_hash_table_key_chunk_per_gpu[id], value_index_buf, hash_table_key_tile_size, Base::get_local_gpu(id).get_stream()); size_t value_head = hash_tables[id]->get_and_add_value_head( hash_table_key_tile_size, Base::get_local_gpu(id).get_stream()); // memcpy hash_table_value from CPU to GPU size_t value_offset = tile_counter_per_gpu[id] * embedding_vec_size; value_dst_buf = embedding_tensors[id].get_ptr() + value_offset; CK_CUDA_THROW_(hipMemcpyAsync( value_dst_buf, embedding_ptr + (loop_num * chunk_size + i) * embedding_vec_size, hash_table_value_tile_size_in_B, hipMemcpyHostToDevice, Base::get_local_gpu(id).get_stream())); // set counter tile_counter_per_gpu[id] += hash_table_key_tile_size; } else { continue; } // sync wait functors_.sync_all_gpus(Base::get_resource_manager()); } // end of if(remain_loop_num) // release resources for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(hipFree(d_hash_table_value_index_chunk_per_gpu[id])); CK_CUDA_THROW_(hipFree(d_hash_table_key_chunk_per_gpu[id])); } for (size_t id = 0; id < local_gpu_count; id++) { CK_CUDA_THROW_(hipHostFree(h_hash_table_key_chunk_per_gpu[id])); CK_CUDA_THROW_(hipHostFree(h_hash_table_value_chunk_per_gpu[id])); } } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::dump_parameters( std::string sparse_model) const { dump_parameters(sparse_model, max_vocabulary_size_, Base::get_embedding_vec_size(), hash_table_value_tensors_, hash_tables_); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::dump_parameters( BufferBag &buf_bag, size_t *num) const { TensorBag2 keys_bag = buf_bag.keys; Tensor2<float> &embeddings = buf_bag.embedding; Tensor2<TypeHashKey> keys = Tensor2<TypeHashKey>::stretch_from(keys_bag); dump_parameters(keys, embeddings, num, max_vocabulary_size_, Base::get_embedding_vec_size(), hash_table_value_tensors_, hash_tables_); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::dump_parameters( const std::string &sparse_model, size_t vocabulary_size, size_t embedding_vec_size, const Tensors2<float> &hash_table_value_tensors, const std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables) const { CudaDeviceContext context; size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count(); if (!fs::exists(sparse_model)) { fs::create_directory(sparse_model); } const std::string key_file(sparse_model + "/" + sparse_model + ".key"); const std::string vec_file(sparse_model + "/" + sparse_model + ".vec"); #ifdef ENABLE_MPI MPI_File key_fh, vec_fh; CK_MPI_THROW_( MPI_File_open(MPI_COMM_WORLD, key_file.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &key_fh)); CK_MPI_THROW_( MPI_File_open(MPI_COMM_WORLD, vec_file.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &vec_fh)); #else std::ofstream key_stream(key_file, std::ofstream::binary | std::ofstream::trunc); std::ofstream vec_stream(vec_file, std::ofstream::binary | std::ofstream::trunc); // check if the file is opened successfully if (!vec_stream.is_open() || !key_stream.is_open()) { CK_THROW_(Error_t::WrongInput, "Error: file not open for writing"); return; } #endif // memory allocation std::unique_ptr<size_t[]> count(new size_t[local_gpu_count]); size_t total_count = 0; for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); auto count_tmp = hash_tables[id]->get_size(Base::get_local_gpu(id).get_stream()); if (count_tmp != hash_tables[id]->get_value_head(Base::get_local_gpu(id).get_stream())) { CK_THROW_(Error_t::WrongInput, "Error: hash_table get_value_head() size not equal to get_size()"); } count[id] = count_tmp; total_count += count[id]; } if (total_count > (size_t)vocabulary_size) { CK_THROW_(Error_t::WrongInput, "Error: required download size is larger than hash table vocabulary_size"); } std::vector<size_t> offset_host(local_gpu_count, 0); std::exclusive_scan(count.get(), count.get() + local_gpu_count, offset_host.begin(), 0); TypeHashKey *h_hash_table_key; float *h_hash_table_value; CK_CUDA_THROW_(hipHostMalloc(&h_hash_table_key, total_count * sizeof(TypeHashKey))); CK_CUDA_THROW_(hipHostMalloc(&h_hash_table_value, total_count * embedding_vec_size * sizeof(float))); std::unique_ptr<TypeHashKey *[]> d_hash_table_key(new TypeHashKey *[local_gpu_count]); std::unique_ptr<size_t *[]> d_hash_table_value_index(new size_t *[local_gpu_count]); std::unique_ptr<size_t *[]> d_dump_counter(new size_t *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) continue; context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(hipMallocManaged(&d_hash_table_key[id], count[id] * sizeof(TypeHashKey))); CK_CUDA_THROW_(hipMallocManaged(&d_hash_table_value_index[id], count[id] * sizeof(size_t))); CK_CUDA_THROW_(hipMalloc(&d_dump_counter[id], sizeof(size_t))); } // dump hash table from GPUs for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) continue; context.set_device(Base::get_local_gpu(id).get_device_id()); hash_tables[id]->dump(d_hash_table_key[id], d_hash_table_value_index[id], d_dump_counter[id], Base::get_local_gpu(id).get_stream()); CK_CUDA_THROW_(hipMemcpyAsync(h_hash_table_value + offset_host[id] * embedding_vec_size, hash_table_value_tensors[id].get_ptr(), count[id] * embedding_vec_size * sizeof(float), hipMemcpyDeviceToHost, Base::get_local_gpu(id).get_stream())); } functors_.sync_all_gpus(Base::get_resource_manager()); // sort key according to memory index for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) continue; context.set_device(Base::get_local_gpu(id).get_device_id()); thrust::sort_by_key(thrust::device, d_hash_table_value_index[id], d_hash_table_value_index[id] + count[id], d_hash_table_key[id]); CK_CUDA_THROW_(hipMemcpyAsync(h_hash_table_key + offset_host[id], d_hash_table_key[id], count[id] * sizeof(TypeHashKey), hipMemcpyDeviceToHost, Base::get_local_gpu(id).get_stream())); } functors_.sync_all_gpus(Base::get_resource_manager()); const size_t key_size = sizeof(TypeHashKey); const size_t vec_size = sizeof(float) * embedding_vec_size; // write sparse model to file MESSAGE_("Rank" + std::to_string(Base::get_resource_manager().get_process_id()) + ": Write hash table to file", true); #ifdef ENABLE_MPI int my_rank = Base::get_resource_manager().get_process_id(); int n_ranks = Base::get_resource_manager().get_num_process(); std::vector<size_t> offset_per_rank(n_ranks, 0); CK_MPI_THROW_(MPI_Allgather(&total_count, sizeof(size_t), MPI_CHAR, offset_per_rank.data(), sizeof(size_t), MPI_CHAR, MPI_COMM_WORLD)); std::exclusive_scan(offset_per_rank.begin(), offset_per_rank.end(), offset_per_rank.begin(), 0); size_t key_offset = offset_per_rank[my_rank] * key_size; size_t vec_offset = offset_per_rank[my_rank] * vec_size; CK_MPI_THROW_(MPI_Barrier(MPI_COMM_WORLD)); MPI_Status status; CK_MPI_THROW_(MPI_File_write_at(key_fh, key_offset, h_hash_table_key, total_count * key_size, MPI_CHAR, &status)); CK_MPI_THROW_(MPI_File_write_at(vec_fh, vec_offset, h_hash_table_value, total_count * vec_size, MPI_CHAR, &status)); CK_MPI_THROW_(MPI_File_close(&key_fh)); CK_MPI_THROW_(MPI_File_close(&vec_fh)); #else key_stream.write(reinterpret_cast<char*>(h_hash_table_key), total_count * key_size); vec_stream.write(reinterpret_cast<char*>(h_hash_table_value), total_count * vec_size); #endif for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) continue; context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(hipFree(d_hash_table_key[id])); CK_CUDA_THROW_(hipFree(d_hash_table_value_index[id])); CK_CUDA_THROW_(hipFree(d_dump_counter[id])); } CK_CUDA_THROW_(hipHostFree(h_hash_table_key)); CK_CUDA_THROW_(hipHostFree(h_hash_table_value)); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::dump_parameters( Tensor2<TypeHashKey> &keys, Tensor2<float> &embeddings, size_t *num, size_t vocabulary_size, size_t embedding_vec_size, const Tensors2<float> &embedding_tensors, const std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables) const { TypeHashKey *key_ptr = keys.get_ptr(); float *embedding_ptr = embeddings.get_ptr(); size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count(); // memory allocation std::unique_ptr<size_t[]> count(new size_t[local_gpu_count]); size_t max_count = 0; size_t total_count = 0; CudaDeviceContext context; for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); auto count_tmp_1 = hash_tables[id]->get_size(Base::get_local_gpu(id).get_stream()); auto count_tmp_2 = hash_tables[id]->get_value_head(Base::get_local_gpu(id).get_stream()); if (count_tmp_1 != count_tmp_2) { CK_THROW_(Error_t::WrongInput, "Error: hash_table get_value_head() size not equal to get_size()"); } count[id] = count_tmp_1; max_count = max(max_count, count[id]); total_count += count[id]; } if (total_count > (size_t)vocabulary_size) { CK_THROW_(Error_t::WrongInput, "Error: required download size is larger than hash table vocabulary_size"); } std::unique_ptr<TypeHashKey *[]> h_hash_table_key(new TypeHashKey *[local_gpu_count]); std::unique_ptr<TypeHashKey *[]> d_hash_table_key(new TypeHashKey *[local_gpu_count]); std::unique_ptr<size_t *[]> d_hash_table_value_index(new size_t *[local_gpu_count]); std::unique_ptr<float *[]> h_hash_table_value(new float *[local_gpu_count]); std::unique_ptr<float *[]> d_hash_table_value(new float *[local_gpu_count]); std::unique_ptr<size_t *[]> d_dump_counter(new size_t *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(hipHostMalloc(&h_hash_table_key[id], count[id] * sizeof(TypeHashKey))); CK_CUDA_THROW_(hipMallocManaged(&d_hash_table_key[id], count[id] * sizeof(TypeHashKey))); CK_CUDA_THROW_(hipMallocManaged(&d_hash_table_value_index[id], count[id] * sizeof(size_t))); CK_CUDA_THROW_(hipHostMalloc(&h_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float))); CK_CUDA_THROW_(hipMallocManaged(&d_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float))); CK_CUDA_THROW_(hipMallocManaged(&d_dump_counter[id], count[id] * sizeof(size_t))); } // dump hash table from GPUs for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(Base::get_local_gpu(id).get_device_id()); hash_tables[id]->dump(d_hash_table_key[id], d_hash_table_value_index[id], d_dump_counter[id], Base::get_local_gpu(id).get_stream()); CK_CUDA_THROW_(hipMemcpyAsync(h_hash_table_key[id], d_hash_table_key[id], count[id] * sizeof(TypeHashKey), hipMemcpyDeviceToHost, Base::get_local_gpu(id).get_stream())); functors_.get_hash_value(count[id], embedding_vec_size, d_hash_table_value_index[id], embedding_tensors[id].get_ptr(), d_hash_table_value[id], Base::get_local_gpu(id).get_stream()); CK_CUDA_THROW_(hipMemcpyAsync(h_hash_table_value[id], d_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float), hipMemcpyDeviceToHost, Base::get_local_gpu(id).get_stream())); } // sync wait functors_.sync_all_gpus(Base::get_resource_manager()); const size_t key_size = sizeof(TypeHashKey); const size_t value_size = sizeof(float) * embedding_vec_size; size_t offset = 0; for (size_t id = 0; id < local_gpu_count; id++) { size_t size_in_B = count[id] * (sizeof(TypeHashKey) + sizeof(float) * embedding_vec_size); for (unsigned int k = 0; k < count[id]; k++) { memcpy(key_ptr + offset, h_hash_table_key[id] + k, key_size); memcpy(embedding_ptr + offset * embedding_vec_size, h_hash_table_value[id] + k * embedding_vec_size, value_size); offset += 1; } } *num = offset; for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(hipHostFree(h_hash_table_key[id])); CK_CUDA_THROW_(hipFree(d_hash_table_key[id])); CK_CUDA_THROW_(hipFree(d_hash_table_value_index[id])); CK_CUDA_THROW_(hipHostFree(h_hash_table_value[id])); CK_CUDA_THROW_(hipFree(d_hash_table_value[id])); CK_CUDA_THROW_(hipFree(d_dump_counter[id])); } return; } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::dump_opt_states( std::ofstream& stream) { std::vector<OptimizerTensor<TypeEmbeddingComp>> opt_tensors_; for(auto &opt: embedding_optimizers_){ opt_tensors_.push_back(opt.opt_tensors_); } auto opt_states = functors_.get_opt_states(opt_tensors_, Base::get_optimizer(), Base::get_resource_manager().get_local_gpu_count()); functors_.dump_opt_states(stream, Base::get_resource_manager(), opt_states); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::load_opt_states( std::ifstream& stream) { std::vector<OptimizerTensor<TypeEmbeddingComp>> opt_tensors_; for(auto &opt: embedding_optimizers_){ opt_tensors_.push_back(opt.opt_tensors_); } auto opt_states = functors_.get_opt_states(opt_tensors_, Base::get_optimizer(), Base::get_resource_manager().get_local_gpu_count()); functors_.load_opt_states(stream, Base::get_resource_manager(), opt_states); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::init_embedding( size_t max_vocabulary_size_per_gpu, size_t embedding_vec_size, Tensors2<float> &hash_table_value_tensors) { #pragma omp parallel num_threads(Base::get_resource_manager().get_local_gpu_count()) { size_t id = omp_get_thread_num(); CudaDeviceContext context(Base::get_local_gpu(id).get_device_id()); MESSAGE_("gpu" + std::to_string(id) + " start to init embedding"); HugeCTR::UniformGenerator::fill(hash_table_value_tensors[id], -0.05f, 0.05f, Base::get_local_gpu(id).get_sm_count(), Base::get_local_gpu(id).get_replica_variant_curand_generator(), Base::get_local_gpu(id).get_stream()); CK_CUDA_THROW_(hipStreamSynchronize(Base::get_local_gpu(id).get_stream())); MESSAGE_("gpu" + std::to_string(id) + " init embedding done"); } } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::reset() { CudaDeviceContext context; for (size_t i = 0; i < Base::get_resource_manager().get_local_gpu_count(); i++) { context.set_device(Base::get_local_gpu(i).get_device_id()); hash_tables_[i]->clear(Base::get_local_gpu(i).get_stream()); HugeCTR::UniformGenerator::fill(hash_table_value_tensors_[i], -0.05f, 0.05f, Base::get_local_gpu(i).get_sm_count(), Base::get_local_gpu(i).get_replica_variant_curand_generator(), Base::get_local_gpu(i).get_stream()); } for (size_t id = 0; id < Base::get_resource_manager().get_local_gpu_count(); id++) { CK_CUDA_THROW_(hipStreamSynchronize(Base::get_local_gpu(id).get_stream())); } } template class DistributedSlotSparseEmbeddingHash<unsigned int, float>; template class DistributedSlotSparseEmbeddingHash<long long, float>; template class DistributedSlotSparseEmbeddingHash<unsigned int, __half>; template class DistributedSlotSparseEmbeddingHash<long long, __half>; } // namespace HugeCTR
fccadb016e38e651478ff5afb191ed28fefa8a22.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/data_simulator.hpp" #include "HugeCTR/include/embeddings/distributed_slot_sparse_embedding_hash.hpp" #include "HugeCTR/include/utils.cuh" #include <numeric> #include <experimental/filesystem> #include <thrust/sort.h> #include <thrust/execution_policy.h> namespace fs = std::experimental::filesystem; namespace HugeCTR { template <typename TypeHashKey, typename TypeEmbeddingComp> DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>:: DistributedSlotSparseEmbeddingHash( const Tensors2<TypeHashKey> &train_row_offsets_tensors, const Tensors2<TypeHashKey> &train_value_tensors, const std::vector<std::shared_ptr<size_t>> &train_nnz_array, const Tensors2<TypeHashKey> &evaluate_row_offsets_tensors, const Tensors2<TypeHashKey> &evaluate_value_tensors, const std::vector<std::shared_ptr<size_t>> &evaluate_nnz_array, const SparseEmbeddingHashParams &embedding_params, const std::shared_ptr<ResourceManager> &resource_manager) : Base(train_row_offsets_tensors, train_value_tensors, train_nnz_array, evaluate_row_offsets_tensors, evaluate_value_tensors, evaluate_nnz_array, Embedding_t::DistributedSlotSparseEmbeddingHash, embedding_params, resource_manager) { try { // CAUSION: can not decide how many <key,value> pairs in each GPU, because the GPU // distribution is computed by (key%gpu_count). In order to not allocate the total size of // hash table on each GPU, meanwhile get a better performance by a unfull hash table, the // users need to set the param "load_factor"(load_factor<1). max_vocabulary_size_per_gpu_ = Base::get_max_vocabulary_size_per_gpu(); max_vocabulary_size_ = max_vocabulary_size_per_gpu_ * Base::get_resource_manager().get_global_gpu_count(); MESSAGE_("max_vocabulary_size_per_gpu_=" + std::to_string(max_vocabulary_size_per_gpu_)); CudaDeviceContext context; for (size_t id = 0; id < Base::get_resource_manager().get_local_gpu_count(); id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); // new GeneralBuffer objects const std::shared_ptr<GeneralBuffer2<CudaAllocator>> &buf = Base::get_buffer(id); embedding_optimizers_.emplace_back(max_vocabulary_size_per_gpu_, Base::embedding_params_, buf); // new hash table value vectors { Tensor2<float> tensor; buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor); hash_table_value_tensors_.push_back(tensor); } // new hash table value_index that get() from HashTable { Tensor2<size_t> tensor; buf->reserve({1, Base::get_universal_batch_size() * Base::get_max_feature_num()}, &tensor); hash_value_index_tensors_.push_back(tensor); } // new embedding features reduced by hash table values(results of forward) { Tensor2<TypeEmbeddingComp> tensor; buf->reserve({Base::get_universal_batch_size() * Base::get_slot_num(), Base::get_embedding_vec_size()}, &tensor); embedding_feature_tensors_.push_back(tensor); } // new wgrad used by backward { Tensor2<TypeEmbeddingComp> tensor; buf->reserve( {Base::get_batch_size(true) * Base::get_slot_num(), Base::get_embedding_vec_size()}, &tensor); wgrad_tensors_.push_back(tensor); } // new temp tensors used by update_params { Tensor2<TypeHashKey> tensor; buf->reserve({1, Base::get_universal_batch_size() * Base::get_slot_num() + 1}, &tensor); row_offset_allreduce_tensors_.push_back(tensor); } { Tensor2<TypeEmbeddingComp> tensor; buf->reserve({Base::get_universal_batch_size() * Base::get_slot_num(), Base::get_embedding_vec_size()}, &tensor); utest_forward_temp_tensors_.push_back(tensor); } // init GenenralBuffers to do real allocation #ifndef NDEBUG std::cout << " max_feature_num_:" << Base::get_max_feature_num() << std::endl; #endif } hash_tables_.resize(Base::get_resource_manager().get_local_gpu_count()); #pragma omp parallel num_threads(Base::get_resource_manager().get_local_gpu_count()) { size_t id = omp_get_thread_num(); CudaDeviceContext context(Base::get_local_gpu(id).get_device_id()); // construct HashTable object: used to store hash table <key, value_index> hash_tables_[id].reset(new NvHashTable(max_vocabulary_size_per_gpu_)); Base::get_buffer(id)->allocate(); } for (size_t id = 0; id < Base::get_resource_manager().get_local_gpu_count(); id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); embedding_optimizers_[id].initialize(Base::get_local_gpu(id)); } // end of for(int id = 0; id < Base::get_local_gpu_count(); id++) functors_.sync_all_gpus(Base::get_resource_manager()); } catch (const std::runtime_error &rt_err) { std::cerr << rt_err.what() << std::endl; throw; } return; } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::load_parameters( std::string sparse_model) { if (!fs::exists(sparse_model)) { CK_THROW_(Error_t::WrongInput, std::string("Folder ") + sparse_model + " doesn't exist"); } const std::string key_file(sparse_model + "/" + sparse_model + ".key"); const std::string vec_file(sparse_model + "/" + sparse_model + ".vec"); std::ifstream key_stream(key_file, std::ifstream::binary); std::ifstream vec_stream(vec_file, std::ifstream::binary); // check if file is opened successfully if (!vec_stream.is_open() || !key_stream.is_open()) { CK_THROW_(Error_t::WrongInput, "Error: file not open for reading"); } size_t key_file_size_in_byte = fs::file_size(key_file); size_t vec_file_size_in_byte = fs::file_size(vec_file); size_t key_size = sizeof(TypeHashKey); size_t vec_size = sizeof(float) * Base::get_embedding_vec_size(); size_t key_num = key_file_size_in_byte / key_size; size_t vec_num = vec_file_size_in_byte / vec_size; if (key_num != vec_num || key_file_size_in_byte % key_size != 0 || vec_file_size_in_byte % vec_size != 0) { CK_THROW_(Error_t::WrongInput, "Error: file size is not correct"); } auto blobs_buff = GeneralBuffer2<CudaHostAllocator>::create(); Tensor2<TypeHashKey> keys; blobs_buff->reserve({key_num}, &keys); Tensor2<float> embeddings; blobs_buff->reserve({vec_num, Base::get_embedding_vec_size()}, &embeddings); blobs_buff->allocate(); TypeHashKey *key_ptr = keys.get_ptr(); float *embedding_ptr = embeddings.get_ptr(); key_stream.read(reinterpret_cast<char *>(key_ptr), key_file_size_in_byte); vec_stream.read(reinterpret_cast<char *>(embedding_ptr), vec_file_size_in_byte); load_parameters(keys, embeddings, key_num, max_vocabulary_size_, Base::get_embedding_vec_size(), max_vocabulary_size_per_gpu_, hash_table_value_tensors_, hash_tables_); return; } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::load_parameters( BufferBag &buf_bag, size_t num) { const TensorBag2 &keys_bag = buf_bag.keys; const Tensor2<float> &embeddings = buf_bag.embedding; const Tensor2<TypeHashKey> keys = Tensor2<TypeHashKey>::stretch_from(keys_bag); load_parameters(keys, embeddings, num, max_vocabulary_size_, Base::get_embedding_vec_size(), max_vocabulary_size_per_gpu_, hash_table_value_tensors_, hash_tables_); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::load_parameters( const Tensor2<TypeHashKey> &keys, const Tensor2<float> &embeddings, size_t num, size_t vocabulary_size, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, Tensors2<float> &embedding_tensors, std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables) { if (keys.get_dimensions()[0] < num || embeddings.get_dimensions()[0] < num) { CK_THROW_(Error_t::WrongInput, "The rows of keys and embeddings are not consistent."); } if (num > vocabulary_size) { CK_THROW_(Error_t::WrongInput, "Error: hash table file size is larger than hash table vocabulary_size"); } const TypeHashKey *key_ptr = keys.get_ptr(); const float *embedding_ptr = embeddings.get_ptr(); int my_rank = Base::get_resource_manager().get_process_id(); int n_ranks = Base::get_resource_manager().get_num_process(); // define size size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count(); const size_t chunk_size = 1000; size_t hash_table_key_tile_size = 1; size_t hash_table_key_tile_size_in_B = hash_table_key_tile_size * sizeof(TypeHashKey); size_t hash_table_key_chunk_size = hash_table_key_tile_size * chunk_size; size_t hash_table_key_chunk_size_in_B = hash_table_key_chunk_size * sizeof(TypeHashKey); size_t hash_table_value_index_chunk_size_in_B = hash_table_key_chunk_size * sizeof(size_t); size_t hash_table_value_tile_size = embedding_vec_size; size_t hash_table_value_tile_size_in_B = hash_table_value_tile_size * sizeof(float); size_t hash_table_value_chunk_size = hash_table_value_tile_size * chunk_size; size_t hash_table_value_chunk_size_in_B = hash_table_value_chunk_size * sizeof(float); // CAUSION: can not decide how many values for each GPU, so need to allocate enough memory // for each GPU allocate GPU memory for hash_table_value_index std::unique_ptr<size_t[]> tile_counter_per_gpu( new size_t[local_gpu_count]); // <= hash_table_value_index_per_gpu_size memset(tile_counter_per_gpu.get(), 0, sizeof(size_t) * local_gpu_count); std::unique_ptr<size_t[]> tile_counter_in_chunk_per_gpu(new size_t[local_gpu_count]); memset(tile_counter_in_chunk_per_gpu.get(), 0, sizeof(size_t) * local_gpu_count); std::unique_ptr<size_t *[]> d_hash_table_value_index_chunk_per_gpu(new size_t *[local_gpu_count]); CudaDeviceContext context; for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(cudaMalloc(&d_hash_table_value_index_chunk_per_gpu[id], hash_table_value_index_chunk_size_in_B)); // initalize to zeros CK_CUDA_THROW_(cudaMemsetAsync(d_hash_table_value_index_chunk_per_gpu[id], 0, hash_table_value_index_chunk_size_in_B, Base::get_local_gpu(id).get_stream())); } // sync wait functors_.sync_all_gpus(Base::get_resource_manager()); // CAUSION: can not decide how many values for each GPU, so need to allocate enough memory // for each GPU allocate CPU/GPU memory for hash_table/key/value chunk std::unique_ptr<TypeHashKey *[]> h_hash_table_key_chunk_per_gpu( new TypeHashKey *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { CK_CUDA_THROW_( cudaMallocHost(&h_hash_table_key_chunk_per_gpu[id], hash_table_key_chunk_size_in_B)); } std::unique_ptr<TypeHashKey *[]> d_hash_table_key_chunk_per_gpu( new TypeHashKey *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(cudaMalloc(&d_hash_table_key_chunk_per_gpu[id], hash_table_key_chunk_size_in_B)); } std::unique_ptr<float *[]> h_hash_table_value_chunk_per_gpu(new float *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { CK_CUDA_THROW_(cudaMallocHost(&h_hash_table_value_chunk_per_gpu[id], hash_table_value_chunk_size_in_B)); } // do upload size_t loop_num = num / chunk_size; for (size_t i = 0; i < loop_num; i++) { TypeHashKey *key_dst_buf; float *value_dst_buf; for (size_t k = 0; k < chunk_size; k++) { // process a tile in each loop TypeHashKey key = key_ptr[i * chunk_size + k]; size_t gid = key % Base::get_resource_manager().get_global_gpu_count(); // global GPU ID size_t id = Base::get_resource_manager().get_gpu_local_id_from_global_id( gid); // local GPU ID (not gpudevice id) int dst_rank = Base::get_resource_manager().get_process_id_from_gpu_global_id(gid); // node id if (my_rank == dst_rank) { // memcpy hash_table_key to corresponding GPU key_dst_buf = h_hash_table_key_chunk_per_gpu[id] + tile_counter_in_chunk_per_gpu[id] * hash_table_key_tile_size; *key_dst_buf = key; // memcpy hash_table_value to corresponding GPU value_dst_buf = h_hash_table_value_chunk_per_gpu[id] + tile_counter_in_chunk_per_gpu[id] * hash_table_value_tile_size; memcpy(value_dst_buf, embedding_ptr + (i * chunk_size + k) * embedding_vec_size, hash_table_value_tile_size_in_B); tile_counter_in_chunk_per_gpu[id] += 1; } else { continue; } } // end of for(int k = 0; k < (chunk_loop * local_gpu_count); k++) // do HashTable insert <key,value_index> for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); size_t tile_count = tile_counter_in_chunk_per_gpu[id]; // memcpy hash_table_key from CPU to GPU CK_CUDA_THROW_(cudaMemcpyAsync(d_hash_table_key_chunk_per_gpu[id], h_hash_table_key_chunk_per_gpu[id], tile_count * sizeof(TypeHashKey), cudaMemcpyHostToDevice, Base::get_local_gpu(id).get_stream())); size_t value_index_offset = tile_counter_per_gpu[id]; size_t *value_index_buf = d_hash_table_value_index_chunk_per_gpu[id]; if (tile_count > 0) { // set hash_table_value_index on GPU functors_.memset_liner(value_index_buf, value_index_offset, 1ul, tile_count, Base::get_local_gpu(id).get_stream()); } // do hash table insert <key, value_index> on GPU hash_tables[id]->insert(d_hash_table_key_chunk_per_gpu[id], value_index_buf, tile_count, Base::get_local_gpu(id).get_stream()); size_t value_head = hash_tables[id]->get_and_add_value_head(tile_count, Base::get_local_gpu(id).get_stream()); } // memcpy hash_table_value from CPU to GPU for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); size_t value_chunk_size = tile_counter_in_chunk_per_gpu[id] * embedding_vec_size; size_t value_chunk_offset = tile_counter_per_gpu[id] * embedding_vec_size; float *src_buf = h_hash_table_value_chunk_per_gpu[id]; float *dst_buf = embedding_tensors[id].get_ptr() + value_chunk_offset; CK_CUDA_THROW_(cudaMemcpyAsync(dst_buf, src_buf, value_chunk_size * sizeof(float), cudaMemcpyHostToDevice, Base::get_local_gpu(id).get_stream())); } functors_.sync_all_gpus(Base::get_resource_manager()); // set counter value for (size_t id = 0; id < local_gpu_count; id++) { tile_counter_per_gpu[id] += tile_counter_in_chunk_per_gpu[id]; tile_counter_in_chunk_per_gpu[id] = 0; // reset chunk counter to zero if (tile_counter_per_gpu[id] > max_vocabulary_size_per_gpu) { char msg[100]{0}; sprintf(msg, "The size of hash table on GPU %zu is out of range %zu\n", id, max_vocabulary_size_per_gpu); CK_THROW_(Error_t::OutOfBound, msg); } } } // end of for(int i = 0; i < loop_num; i++) // process the remaining data(less than a chunk) size_t remain_loop_num = num - loop_num * chunk_size; TypeHashKey *key_dst_buf; size_t *value_index_buf; float *value_dst_buf; for (size_t i = 0; i < remain_loop_num; i++) { TypeHashKey key = key_ptr[loop_num * chunk_size + i]; size_t gid = key % Base::get_resource_manager().get_global_gpu_count(); // global GPU ID size_t id = Base::get_resource_manager().get_gpu_local_id_from_global_id( gid); // local GPU ID (not gpudevice id) int dst_rank = Base::get_resource_manager().get_process_id_from_gpu_global_id(gid); if (my_rank == dst_rank) { context.set_device(Base::get_local_gpu(id).get_device_id()); // memcpy hash_table_key from CPU to GPU key_dst_buf = d_hash_table_key_chunk_per_gpu[id]; CK_CUDA_THROW_(cudaMemcpyAsync(key_dst_buf, &key, hash_table_key_tile_size_in_B, cudaMemcpyHostToDevice, Base::get_local_gpu(id).get_stream())); // set value_index size_t value_index_offset = tile_counter_per_gpu[id]; value_index_buf = d_hash_table_value_index_chunk_per_gpu[id]; functors_.memset_liner(value_index_buf, value_index_offset, 1ul, 1ul, Base::get_local_gpu(id).get_stream()); // do hash table insert <key, value_index> on GPU hash_tables[id]->insert(d_hash_table_key_chunk_per_gpu[id], value_index_buf, hash_table_key_tile_size, Base::get_local_gpu(id).get_stream()); size_t value_head = hash_tables[id]->get_and_add_value_head( hash_table_key_tile_size, Base::get_local_gpu(id).get_stream()); // memcpy hash_table_value from CPU to GPU size_t value_offset = tile_counter_per_gpu[id] * embedding_vec_size; value_dst_buf = embedding_tensors[id].get_ptr() + value_offset; CK_CUDA_THROW_(cudaMemcpyAsync( value_dst_buf, embedding_ptr + (loop_num * chunk_size + i) * embedding_vec_size, hash_table_value_tile_size_in_B, cudaMemcpyHostToDevice, Base::get_local_gpu(id).get_stream())); // set counter tile_counter_per_gpu[id] += hash_table_key_tile_size; } else { continue; } // sync wait functors_.sync_all_gpus(Base::get_resource_manager()); } // end of if(remain_loop_num) // release resources for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(cudaFree(d_hash_table_value_index_chunk_per_gpu[id])); CK_CUDA_THROW_(cudaFree(d_hash_table_key_chunk_per_gpu[id])); } for (size_t id = 0; id < local_gpu_count; id++) { CK_CUDA_THROW_(cudaFreeHost(h_hash_table_key_chunk_per_gpu[id])); CK_CUDA_THROW_(cudaFreeHost(h_hash_table_value_chunk_per_gpu[id])); } } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::dump_parameters( std::string sparse_model) const { dump_parameters(sparse_model, max_vocabulary_size_, Base::get_embedding_vec_size(), hash_table_value_tensors_, hash_tables_); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::dump_parameters( BufferBag &buf_bag, size_t *num) const { TensorBag2 keys_bag = buf_bag.keys; Tensor2<float> &embeddings = buf_bag.embedding; Tensor2<TypeHashKey> keys = Tensor2<TypeHashKey>::stretch_from(keys_bag); dump_parameters(keys, embeddings, num, max_vocabulary_size_, Base::get_embedding_vec_size(), hash_table_value_tensors_, hash_tables_); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::dump_parameters( const std::string &sparse_model, size_t vocabulary_size, size_t embedding_vec_size, const Tensors2<float> &hash_table_value_tensors, const std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables) const { CudaDeviceContext context; size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count(); if (!fs::exists(sparse_model)) { fs::create_directory(sparse_model); } const std::string key_file(sparse_model + "/" + sparse_model + ".key"); const std::string vec_file(sparse_model + "/" + sparse_model + ".vec"); #ifdef ENABLE_MPI MPI_File key_fh, vec_fh; CK_MPI_THROW_( MPI_File_open(MPI_COMM_WORLD, key_file.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &key_fh)); CK_MPI_THROW_( MPI_File_open(MPI_COMM_WORLD, vec_file.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &vec_fh)); #else std::ofstream key_stream(key_file, std::ofstream::binary | std::ofstream::trunc); std::ofstream vec_stream(vec_file, std::ofstream::binary | std::ofstream::trunc); // check if the file is opened successfully if (!vec_stream.is_open() || !key_stream.is_open()) { CK_THROW_(Error_t::WrongInput, "Error: file not open for writing"); return; } #endif // memory allocation std::unique_ptr<size_t[]> count(new size_t[local_gpu_count]); size_t total_count = 0; for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); auto count_tmp = hash_tables[id]->get_size(Base::get_local_gpu(id).get_stream()); if (count_tmp != hash_tables[id]->get_value_head(Base::get_local_gpu(id).get_stream())) { CK_THROW_(Error_t::WrongInput, "Error: hash_table get_value_head() size not equal to get_size()"); } count[id] = count_tmp; total_count += count[id]; } if (total_count > (size_t)vocabulary_size) { CK_THROW_(Error_t::WrongInput, "Error: required download size is larger than hash table vocabulary_size"); } std::vector<size_t> offset_host(local_gpu_count, 0); std::exclusive_scan(count.get(), count.get() + local_gpu_count, offset_host.begin(), 0); TypeHashKey *h_hash_table_key; float *h_hash_table_value; CK_CUDA_THROW_(cudaMallocHost(&h_hash_table_key, total_count * sizeof(TypeHashKey))); CK_CUDA_THROW_(cudaMallocHost(&h_hash_table_value, total_count * embedding_vec_size * sizeof(float))); std::unique_ptr<TypeHashKey *[]> d_hash_table_key(new TypeHashKey *[local_gpu_count]); std::unique_ptr<size_t *[]> d_hash_table_value_index(new size_t *[local_gpu_count]); std::unique_ptr<size_t *[]> d_dump_counter(new size_t *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) continue; context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(cudaMallocManaged(&d_hash_table_key[id], count[id] * sizeof(TypeHashKey))); CK_CUDA_THROW_(cudaMallocManaged(&d_hash_table_value_index[id], count[id] * sizeof(size_t))); CK_CUDA_THROW_(cudaMalloc(&d_dump_counter[id], sizeof(size_t))); } // dump hash table from GPUs for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) continue; context.set_device(Base::get_local_gpu(id).get_device_id()); hash_tables[id]->dump(d_hash_table_key[id], d_hash_table_value_index[id], d_dump_counter[id], Base::get_local_gpu(id).get_stream()); CK_CUDA_THROW_(cudaMemcpyAsync(h_hash_table_value + offset_host[id] * embedding_vec_size, hash_table_value_tensors[id].get_ptr(), count[id] * embedding_vec_size * sizeof(float), cudaMemcpyDeviceToHost, Base::get_local_gpu(id).get_stream())); } functors_.sync_all_gpus(Base::get_resource_manager()); // sort key according to memory index for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) continue; context.set_device(Base::get_local_gpu(id).get_device_id()); thrust::sort_by_key(thrust::device, d_hash_table_value_index[id], d_hash_table_value_index[id] + count[id], d_hash_table_key[id]); CK_CUDA_THROW_(cudaMemcpyAsync(h_hash_table_key + offset_host[id], d_hash_table_key[id], count[id] * sizeof(TypeHashKey), cudaMemcpyDeviceToHost, Base::get_local_gpu(id).get_stream())); } functors_.sync_all_gpus(Base::get_resource_manager()); const size_t key_size = sizeof(TypeHashKey); const size_t vec_size = sizeof(float) * embedding_vec_size; // write sparse model to file MESSAGE_("Rank" + std::to_string(Base::get_resource_manager().get_process_id()) + ": Write hash table to file", true); #ifdef ENABLE_MPI int my_rank = Base::get_resource_manager().get_process_id(); int n_ranks = Base::get_resource_manager().get_num_process(); std::vector<size_t> offset_per_rank(n_ranks, 0); CK_MPI_THROW_(MPI_Allgather(&total_count, sizeof(size_t), MPI_CHAR, offset_per_rank.data(), sizeof(size_t), MPI_CHAR, MPI_COMM_WORLD)); std::exclusive_scan(offset_per_rank.begin(), offset_per_rank.end(), offset_per_rank.begin(), 0); size_t key_offset = offset_per_rank[my_rank] * key_size; size_t vec_offset = offset_per_rank[my_rank] * vec_size; CK_MPI_THROW_(MPI_Barrier(MPI_COMM_WORLD)); MPI_Status status; CK_MPI_THROW_(MPI_File_write_at(key_fh, key_offset, h_hash_table_key, total_count * key_size, MPI_CHAR, &status)); CK_MPI_THROW_(MPI_File_write_at(vec_fh, vec_offset, h_hash_table_value, total_count * vec_size, MPI_CHAR, &status)); CK_MPI_THROW_(MPI_File_close(&key_fh)); CK_MPI_THROW_(MPI_File_close(&vec_fh)); #else key_stream.write(reinterpret_cast<char*>(h_hash_table_key), total_count * key_size); vec_stream.write(reinterpret_cast<char*>(h_hash_table_value), total_count * vec_size); #endif for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) continue; context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(cudaFree(d_hash_table_key[id])); CK_CUDA_THROW_(cudaFree(d_hash_table_value_index[id])); CK_CUDA_THROW_(cudaFree(d_dump_counter[id])); } CK_CUDA_THROW_(cudaFreeHost(h_hash_table_key)); CK_CUDA_THROW_(cudaFreeHost(h_hash_table_value)); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::dump_parameters( Tensor2<TypeHashKey> &keys, Tensor2<float> &embeddings, size_t *num, size_t vocabulary_size, size_t embedding_vec_size, const Tensors2<float> &embedding_tensors, const std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables) const { TypeHashKey *key_ptr = keys.get_ptr(); float *embedding_ptr = embeddings.get_ptr(); size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count(); // memory allocation std::unique_ptr<size_t[]> count(new size_t[local_gpu_count]); size_t max_count = 0; size_t total_count = 0; CudaDeviceContext context; for (size_t id = 0; id < local_gpu_count; id++) { context.set_device(Base::get_local_gpu(id).get_device_id()); auto count_tmp_1 = hash_tables[id]->get_size(Base::get_local_gpu(id).get_stream()); auto count_tmp_2 = hash_tables[id]->get_value_head(Base::get_local_gpu(id).get_stream()); if (count_tmp_1 != count_tmp_2) { CK_THROW_(Error_t::WrongInput, "Error: hash_table get_value_head() size not equal to get_size()"); } count[id] = count_tmp_1; max_count = max(max_count, count[id]); total_count += count[id]; } if (total_count > (size_t)vocabulary_size) { CK_THROW_(Error_t::WrongInput, "Error: required download size is larger than hash table vocabulary_size"); } std::unique_ptr<TypeHashKey *[]> h_hash_table_key(new TypeHashKey *[local_gpu_count]); std::unique_ptr<TypeHashKey *[]> d_hash_table_key(new TypeHashKey *[local_gpu_count]); std::unique_ptr<size_t *[]> d_hash_table_value_index(new size_t *[local_gpu_count]); std::unique_ptr<float *[]> h_hash_table_value(new float *[local_gpu_count]); std::unique_ptr<float *[]> d_hash_table_value(new float *[local_gpu_count]); std::unique_ptr<size_t *[]> d_dump_counter(new size_t *[local_gpu_count]); for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(cudaMallocHost(&h_hash_table_key[id], count[id] * sizeof(TypeHashKey))); CK_CUDA_THROW_(cudaMallocManaged(&d_hash_table_key[id], count[id] * sizeof(TypeHashKey))); CK_CUDA_THROW_(cudaMallocManaged(&d_hash_table_value_index[id], count[id] * sizeof(size_t))); CK_CUDA_THROW_(cudaMallocHost(&h_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float))); CK_CUDA_THROW_(cudaMallocManaged(&d_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float))); CK_CUDA_THROW_(cudaMallocManaged(&d_dump_counter[id], count[id] * sizeof(size_t))); } // dump hash table from GPUs for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(Base::get_local_gpu(id).get_device_id()); hash_tables[id]->dump(d_hash_table_key[id], d_hash_table_value_index[id], d_dump_counter[id], Base::get_local_gpu(id).get_stream()); CK_CUDA_THROW_(cudaMemcpyAsync(h_hash_table_key[id], d_hash_table_key[id], count[id] * sizeof(TypeHashKey), cudaMemcpyDeviceToHost, Base::get_local_gpu(id).get_stream())); functors_.get_hash_value(count[id], embedding_vec_size, d_hash_table_value_index[id], embedding_tensors[id].get_ptr(), d_hash_table_value[id], Base::get_local_gpu(id).get_stream()); CK_CUDA_THROW_(cudaMemcpyAsync(h_hash_table_value[id], d_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float), cudaMemcpyDeviceToHost, Base::get_local_gpu(id).get_stream())); } // sync wait functors_.sync_all_gpus(Base::get_resource_manager()); const size_t key_size = sizeof(TypeHashKey); const size_t value_size = sizeof(float) * embedding_vec_size; size_t offset = 0; for (size_t id = 0; id < local_gpu_count; id++) { size_t size_in_B = count[id] * (sizeof(TypeHashKey) + sizeof(float) * embedding_vec_size); for (unsigned int k = 0; k < count[id]; k++) { memcpy(key_ptr + offset, h_hash_table_key[id] + k, key_size); memcpy(embedding_ptr + offset * embedding_vec_size, h_hash_table_value[id] + k * embedding_vec_size, value_size); offset += 1; } } *num = offset; for (size_t id = 0; id < local_gpu_count; id++) { if (count[id] == 0) { continue; } context.set_device(Base::get_local_gpu(id).get_device_id()); CK_CUDA_THROW_(cudaFreeHost(h_hash_table_key[id])); CK_CUDA_THROW_(cudaFree(d_hash_table_key[id])); CK_CUDA_THROW_(cudaFree(d_hash_table_value_index[id])); CK_CUDA_THROW_(cudaFreeHost(h_hash_table_value[id])); CK_CUDA_THROW_(cudaFree(d_hash_table_value[id])); CK_CUDA_THROW_(cudaFree(d_dump_counter[id])); } return; } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::dump_opt_states( std::ofstream& stream) { std::vector<OptimizerTensor<TypeEmbeddingComp>> opt_tensors_; for(auto &opt: embedding_optimizers_){ opt_tensors_.push_back(opt.opt_tensors_); } auto opt_states = functors_.get_opt_states(opt_tensors_, Base::get_optimizer(), Base::get_resource_manager().get_local_gpu_count()); functors_.dump_opt_states(stream, Base::get_resource_manager(), opt_states); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::load_opt_states( std::ifstream& stream) { std::vector<OptimizerTensor<TypeEmbeddingComp>> opt_tensors_; for(auto &opt: embedding_optimizers_){ opt_tensors_.push_back(opt.opt_tensors_); } auto opt_states = functors_.get_opt_states(opt_tensors_, Base::get_optimizer(), Base::get_resource_manager().get_local_gpu_count()); functors_.load_opt_states(stream, Base::get_resource_manager(), opt_states); } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::init_embedding( size_t max_vocabulary_size_per_gpu, size_t embedding_vec_size, Tensors2<float> &hash_table_value_tensors) { #pragma omp parallel num_threads(Base::get_resource_manager().get_local_gpu_count()) { size_t id = omp_get_thread_num(); CudaDeviceContext context(Base::get_local_gpu(id).get_device_id()); MESSAGE_("gpu" + std::to_string(id) + " start to init embedding"); HugeCTR::UniformGenerator::fill(hash_table_value_tensors[id], -0.05f, 0.05f, Base::get_local_gpu(id).get_sm_count(), Base::get_local_gpu(id).get_replica_variant_curand_generator(), Base::get_local_gpu(id).get_stream()); CK_CUDA_THROW_(cudaStreamSynchronize(Base::get_local_gpu(id).get_stream())); MESSAGE_("gpu" + std::to_string(id) + " init embedding done"); } } template <typename TypeHashKey, typename TypeEmbeddingComp> void DistributedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::reset() { CudaDeviceContext context; for (size_t i = 0; i < Base::get_resource_manager().get_local_gpu_count(); i++) { context.set_device(Base::get_local_gpu(i).get_device_id()); hash_tables_[i]->clear(Base::get_local_gpu(i).get_stream()); HugeCTR::UniformGenerator::fill(hash_table_value_tensors_[i], -0.05f, 0.05f, Base::get_local_gpu(i).get_sm_count(), Base::get_local_gpu(i).get_replica_variant_curand_generator(), Base::get_local_gpu(i).get_stream()); } for (size_t id = 0; id < Base::get_resource_manager().get_local_gpu_count(); id++) { CK_CUDA_THROW_(cudaStreamSynchronize(Base::get_local_gpu(id).get_stream())); } } template class DistributedSlotSparseEmbeddingHash<unsigned int, float>; template class DistributedSlotSparseEmbeddingHash<long long, float>; template class DistributedSlotSparseEmbeddingHash<unsigned int, __half>; template class DistributedSlotSparseEmbeddingHash<long long, __half>; } // namespace HugeCTR
ff85c61dd0e6fc93d5f39f749efa8c054d699e9a.hip
// !!! This is a file automatically generated by hipify!!! // ** Original codelet code ** // // #pragma hmppcg cpiparam __arg0 IN a%hmpp_codelet__runGemm: (1, 0) // #pragma hmppcg cpiparam __arg1 IN b%hmpp_codelet__runGemm: (1, 1) // #pragma hmppcg cpiparam __arg2 INOUT c%hmpp_codelet__runGemm: (1, 2) // // #pragma hmppcg cpicall hmpp_codelet__runGemm(__arg0, __arg1, __arg2): 1 // // // /* begin of extracted source code for directive set "gemm" */ // // // # 30 "gemm.c" // typedef float DATA_TYPE; // // // # 35 "gemm.c" // void hmpp_codelet__runGemm(DATA_TYPE a[512][512], DATA_TYPE b[512][512], DATA_TYPE c[512][512]) // { // int i, j, k; // DATA_TYPE p_alpha = 32412; // DATA_TYPE p_beta = 2123; // // // #pragma hmppcg grid blocksize 32 X 8 // # 12 "<preprocessor>" // # 43 "gemm.c" // #pragma hmppcg permute i, j, k // # 15 "<preprocessor>" // # 44 "gemm.c" // for (i = 0 ; i < 512 ; i++) // { // for (j = 0 ; j < 512 ; j++) // { // c[i][j] *= p_beta; // // for (k = 0 ; k < 512 ; ++k) // { // c[i][j] += p_alpha * a[i][k] * b[k][j]; // } // } // } // } // // // /* end of extracted source code for directive set "gemm" */ // // // // ** End of original codelet codelet ** #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _MSC_VER # define HMPPCG_RESTRICT typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; # ifdef _WIN64 typedef int64_t intptr_t; # else typedef int32_t intptr_t; # endif #else # if defined(__GNUC__) || defined(__RESTRICT) # define HMPPCG_RESTRICT __restrict # else # define HMPPCG_RESTRICT # endif # include <stdint.h> #endif // Dynamic array typedef struct __hmppcg_array_struct { void *array; size_t *size; size_t elsize; } __hmppcg_array_t; // Data section typedef struct __hmppcg_DataSection { size_t from; size_t to; size_t step; } __hmppcg_DataSection; #include <hip/hip_runtime.h> #if CUDART_VERSION < 2000 #error Bad CUDA Runtime version. CUDA Toolkit 2.0+ required. #endif #define HMPP_CONSTMEM_OFFSET 0 #include <map> #include <string> // ---------------------------------------------------------------------------- // HMPP CUDA support classes // ---------------------------------------------------------------------------- #ifndef __HMPP_CUDADATA_H__ #define __HMPP_CUDADATA_H__ #ifndef HMPPCG_WARP_SIZE #define HMPPCG_WARP_SIZE 32 #endif enum CopyKind { HostToHost = 0, HostToDevice = 1, DeviceToHost = 2, DeviceToDevice = 3, }; inline int hmppcg_check_status(const char *file,int line,hipError_t status) { if(status != hipSuccess) { fprintf(stderr, "%s:%d CUDA Error: %s\n", file, line, hipGetErrorString(status)); return -1; } return 0; } #define CHECK_STATUS(X) hmppcg_check_status(__FILE__,__LINE__,(X)) #define HMPP_CHECK_GRID_BOUNDARY(x) \ if(x>65535){\ fprintf(stderr, "%s:%d Grid Dimension Error: '%s' exceeds the 65535 dimension limit. Please modify the grid size configuration (see the hmppcg grid blocksize pragma) or switch to 2D gridification\n", __FILE__,__LINE__, #x);\ exit(-1) ;\ } #define HMPP_CHECK_BLOCK_BOUNDARY(x) \ if(x > devProp.maxThreadsPerBlock){ \ fprintf(stderr, "%s:%d Number of threads per block exceeds for the HWA: it is '%d' and HWA supports up to '%d'. Please modify the block size configuration (see the hmppcg grid blocksize pragma)\n", __FILE__,__LINE__, x, devProp.maxThreadsPerBlock); \ exit(-1) ; \ } // ---------------------------------------------------------------------------- // class DefaultPolicy // ---------------------------------------------------------------------------- struct DefaultPolicy { public: DefaultPolicy() { } virtual ~DefaultPolicy() { } int deviceAlloc(void **ptr,size_t size) { if( CHECK_STATUS(hipStreamCreate(&stream_)) != 0 ) return -1; if( CHECK_STATUS(hipMalloc(ptr,size)) != 0 ) return -1; #if TORCH_HIP_VERSION >= 3020 if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventBlockingSync)) != 0) return -1; #endif return 0; } int deviceFree(void *ptr) { if( CHECK_STATUS(hipStreamDestroy(stream_)) != 0) return -1; if( CHECK_STATUS(hipFree(ptr)) != 0) return -1; if( CHECK_STATUS(hipEventDestroy(event)) != 0) return -1; return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { static hipMemcpyKind cudaKind[] = {hipMemcpyHostToHost, hipMemcpyHostToDevice, hipMemcpyDeviceToHost, hipMemcpyDeviceToDevice }; if(async) { return CHECK_STATUS(hipMemcpyAsync(dst,src,size,cudaKind[kind],stream_)); } else { return CHECK_STATUS(hipMemcpy(dst,src,size,cudaKind[kind])); } } int makeStreamWait(hipStream_t wstream) { int status; status = CHECK_STATUS(hipEventRecord(event, stream_)); if (status != 0) return status; #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(hipEventSynchronize(event)); #endif } int waitOnEvent(hipEvent_t wevent) { #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(stream_, wevent, 0)); #else return CHECK_STATUS(hipEventSynchronize(wevent)); #endif } int deviceWait() { return CHECK_STATUS(hipStreamSynchronize(stream_)); } private: hipStream_t stream_; hipEvent_t event; }; // ---------------------------------------------------------------------------- // class ConstantPolicy // ---------------------------------------------------------------------------- #ifndef HMPP_CONSTMEM_SIZE #define HMPP_CONSTMEM_SIZE 2048 #endif __constant__ int64_t hmpp_constmem[HMPP_CONSTMEM_SIZE / 8]; /// Shared memory array is aligned on 64 bit thanks to that (to avoid an nvcc compilation error) extern __shared__ int64_t hmpp_sharedmem[]; struct ConstantPolicy { public: ConstantPolicy() { static bool initialized = false; if(!initialized) { next_offset_ = HMPP_CONSTMEM_OFFSET; initialized = true; } offset_ = -1; } virtual ~ConstantPolicy() { } void setStaticOffset(int offset) { offset_ = offset; while(offset_ % 8) offset_ ++; } int deviceAlloc(void **ptr, size_t size) { #if TORCH_HIP_VERSION >= 3020 if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventBlockingSync)) != 0) return -1; #endif if(offset_ != -1) { if((offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)offset_; return 0; } if((next_offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)next_offset_; next_offset_ += size; return 0; } int deviceFree(void *ptr) { return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { size_t offset; switch(kind) { case HostToDevice: offset = (size_t)dst; return CHECK_STATUS(hipMemcpyToSymbol(hmpp_constmem,src,size,offset,hipMemcpyHostToDevice)); case DeviceToHost: offset = (size_t)src; return CHECK_STATUS(hipMemcpyFromSymbol(dst,hmpp_constmem,size,offset,hipMemcpyDeviceToHost)); default: return -1; } } int makeStreamWait(hipStream_t wstream) { int status; /* stream 0 at the moment */ status = CHECK_STATUS(hipEventRecord(event, 0)); if (status != 0) return status; #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(hipEventSynchronize(event)); #endif } int waitOnEvent(hipEvent_t wevent) { /* stream 0 at the moment */ #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(0, wevent, 0)); #else return CHECK_STATUS(hipEventSynchronize(wevent)); #endif } int deviceWait() { return 0; } private: static size_t next_offset_; int offset_; hipEvent_t event; }; size_t ConstantPolicy::next_offset_; // ---------------------------------------------------------------------------- // class Lazy // ---------------------------------------------------------------------------- template <typename Policy> struct Lazy { char * value; bool valid; bool allocated; void ** devaddr; Policy * policy; size_t size; Lazy(size_t elem_size) { value = new char[elem_size]; } ~Lazy() { delete[] value; } int requireDeviceAlloc() { if(!allocated) { allocated = true; return policy->deviceAlloc(devaddr,size); } else { return 0; } } }; // ---------------------------------------------------------------------------- // class Element // ---------------------------------------------------------------------------- template <typename T,typename Policy> struct Element { Element(void * const * device_addr, size_t offset, Policy *policy, Lazy<Policy> * lazy) : device_addr_(device_addr) , offset_(offset), policy_(policy), lazy_(lazy) { } Element &operator=(const T & value) { if(lazy_) { *((T *)(lazy_->value)) = value; lazy_->valid = true; return *this; } if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,(const char*)&value,ElemSize,HostToDevice,false); return *this; } Element &operator=(const Element & src) { if(src.lazy_ && src.lazy_->valid) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)(src.lazy_->value)); return *this; } if(lazy_) lazy_->requireDeviceAlloc(); if(src.lazy_) src.lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,((const char*)(*src.device_addr_)) + src.offset_, ElemSize,DeviceToDevice,false); if(lazy_) { lazy_->valid = false; } return *this; } operator T() { if(lazy_ && lazy_->valid) return *((T *)(lazy_->value)); T res; if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(&res,((const char*)(*device_addr_)) + offset_,ElemSize,DeviceToHost,false); if(lazy_) { *((T *)(lazy_->value)) = res; lazy_->valid = true; } return res; } typedef T Type; enum { ElemSize = sizeof(T) }; private: size_t offset_; void *const* device_addr_; Policy *policy_; public: Lazy<Policy> * lazy_; }; enum DataFlags { DEFAULT = 0x0, LAZY = 0x1 }; // ---------------------------------------------------------------------------- // class Data // ---------------------------------------------------------------------------- template <typename T,typename Policy> class Data { public: typedef T Type; typedef Element<T,Policy> ElementType; enum { ElemSize = sizeof(T) }; Data(const char * name, unsigned int flags = DEFAULT) : name_(name), flags_(flags), dim_(0), sizes_(0), size_(0), host_addr_(0), device_addr_(0) { policy_ = new Policy; if(flags_ & LAZY) { lazy_ = new Lazy<Policy>(ElemSize); lazy_->valid = false; lazy_->devaddr = 0; lazy_->policy = policy_; } else lazy_ = 0; } ~Data() { free(); delete policy_; if(lazy_) delete lazy_; } int allocate(unsigned int dim, size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { const size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return allocate2(dim,sizes); } int allocate3(unsigned int dim_p, const size_t * sizes_p) { size_t sizes[2]; sizes[0] = 1; sizes[1] = 0; for(int d = 0 ; d < dim_p ; d++) { sizes[0] *= sizes_p[d]; } return allocate2(1, sizes); } int allocate2(unsigned int dim, const size_t * sizes) { dim_ = dim; sizes_ = new size_t[dim]; dimSizes_ = new size_t[dim]; size_ = ElemSize; for(int d=0;d<dim;d++) { sizes_[d] = sizes[d]; size_ *= sizes_[d]; size_t size = 1; for(int d2=d+1;d2<dim;d2++) size*=sizes[d2]; dimSizes_[d] = size; } if(lazy_) { lazy_->allocated = false; lazy_->devaddr = &device_addr_; lazy_->size = size_; return 0; } else return policy_->deviceAlloc(&device_addr_,size_); } int free() { if(sizes_) { delete [] sizes_; delete [] dimSizes_; sizes_ = 0; dim_ = 0; size_ = 0; } if(device_addr_) { if(policy_->deviceFree(device_addr_) != 0) return -1; device_addr_ = 0; } return 0; } int download(void * host_addr,bool async) { if(lazy_ && lazy_->valid) { *((T *)host_addr) = *((T *)(lazy_->value)); return 0; } if(lazy_) { lazy_->requireDeviceAlloc(); } int sts = policy_->deviceMemcpy(host_addr,device_addr_,size_,DeviceToHost,async); if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)host_addr); } return sts; } int upload(const void * host_addr,bool async) { if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = * ((T *)host_addr); lazy_->requireDeviceAlloc(); } return policy_->deviceMemcpy(device_addr_,host_addr,size_,HostToDevice,async); } int downloadSection(void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(host_addr,device_addr_,sections,DeviceToHost,async); } int uploadSection(const void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(device_addr_,host_addr,sections,HostToDevice,async); } int makeStreamWait(hipStream_t wstream) { if(lazy_) lazy_->requireDeviceAlloc(); return policy_->makeStreamWait(wstream); } int waitOnEvent(hipEvent_t wevent) { return policy_->waitOnEvent(wevent); } int waitTransfer() { return policy_->deviceWait(); } ElementType operator()(size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return at(sizes); } ElementType at(size_t *idx) { size_t offset = idx[0]; return ElementType(&device_addr_,offset*ElemSize,policy_,lazy_); } template <typename Y> Element<Y,Policy> at(size_t offset) { return Element<Y,Policy>(&device_addr_,offset,policy_,lazy_); } ElementType operator=(const T & value) { ElementType res(&device_addr_,0,policy_,lazy_); res = value; return res; } ElementType operator=(const Data &data) { return operator=(data.value()); } T value() const { ElementType res(&device_addr_,0,policy_,lazy_); return (T)res; } operator T() { return value(); } T *getDeviceAddr() { if(lazy_) lazy_->requireDeviceAlloc(); if(lazy_ && lazy_->valid) { policy_->deviceMemcpy(device_addr_,lazy_->value,size_,HostToDevice,false); } return (T*)device_addr_; } void invalidateLazy() { if(lazy_) { lazy_->valid = false; } } private: Data(const Data &data) {} int sectionCopy(char *dst,const char *src,int offset,int cur, const __hmppcg_DataSection *sections,int lastdense,CopyKind kind,bool async) { int d; int size = 1; for(d=cur+1;d<dim_;d++) size *= sizes_[d]; if(cur<(lastdense-1)) { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=sections[cur].step) if(sectionCopy(dst,src,offset+x*size,cur+1,sections,lastdense,kind,async) != 0) return -1; } else { int step = sections[cur].step; if(step == 1) { int start = (offset + sections[cur].from * size) * ElemSize; int total = (sections[cur].to - sections[cur].from + 1) * size * ElemSize; return policy_->deviceMemcpy(dst+start,src+start,total,kind,async); } else { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=step) { int off = (offset + x * size) * ElemSize; if(policy_->deviceMemcpy(dst+off,src+off,size * ElemSize,kind,async) != 0) return -1; } } } return 0; } int sectionCopy(void *dst,const void *src, const __hmppcg_DataSection *sections,CopyKind kind,bool async) { int i; int lastdense = dim_; for (i = dim_ - 1 ; i >= 0 ; i --) { if ((sections[i].from == 0) && (sections[i].to == sizes_[i] - 1) && (sections[i].step == 1)) lastdense = i; else break; } return sectionCopy((char*)dst,(const char*)src,0,0,sections,lastdense,kind,async); } const char * name_; size_t flags_; void *device_addr_; void *host_addr_; size_t dim_; size_t *sizes_; size_t *dimSizes_; size_t size_; Lazy<Policy> * lazy_; public: Policy *policy_; }; // --------------------------------------------------------------------------- // User data // --------------------------------------------------------------------------- class UserData{ public: virtual ~UserData(){} UserData(){} }; #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef float2 __hmppcg_complex_float; #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef double2 __hmppcg_complex_double; // --------------------------------------------------------------------------- // Allocatable Arrays // --------------------------------------------------------------------------- template <const size_t nb_dims> struct AArrayDesc { int lbounds_[nb_dims]; size_t sizes_[nb_dims]; size_t wholesize_; }; #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE( var, type, nb_dims, ... ) \ { int alloc_ranges[] = { __VA_ARGS__ }; \ int hmppcg_alloc_i; \ var ## _aarray_desc.wholesize_ = 1; \ for(hmppcg_alloc_i=0; hmppcg_alloc_i<nb_dims; hmppcg_alloc_i++){ \ int hmppcg_alloc_first = alloc_ranges[2*hmppcg_alloc_i]; \ int hmppcg_alloc_last = alloc_ranges[2*hmppcg_alloc_i + 1]; \ int hmppcg_alloc_size = hmppcg_alloc_last - hmppcg_alloc_first + 1; \ var ## _aarray_desc.lbounds_[hmppcg_alloc_i] = hmppcg_alloc_first; \ var ## _aarray_desc.sizes_[hmppcg_alloc_i] = hmppcg_alloc_size; \ var ## _aarray_desc.wholesize_ *= hmppcg_alloc_size; \ } \ if((hmppcg_status_ = var.allocate2(nb_dims, var ## _aarray_desc.sizes_))) \ return; \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE( var ) \ { \ var.free(); \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED( var ) \ (var.getDeviceAddr() != NULL) #endif //__HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #ifndef __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #define __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE( var ) \ var ## _aarray_desc.wholesize_ #endif //__HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_SIZE #define __HMPPCG_ALLOCATABLE_ARRAY_SIZE( var, d ) \ var ## _aarray_desc.sizes_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_SIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_LBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_LBOUND( var, d ) \ var ## _aarray_desc.lbounds_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_LBOUND #ifndef __HMPPCG_ALLOCATABLE_ARRAY_UBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_UBOUND( var, d ) \ (var ## _aarray_desc.sizes_[d] + var ## _aarray_desc.lbounds_[d] - 1) #endif //__HMPPCG_ALLOCATABLE_ARRAY_UBOUND #ifndef __HMPP_INT_POW_FUNC #define __HMPP_INT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ if(exp < 0) \ return 0; \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_INT_POW_FUNC( i64, int64_t ); __HMPP_INT_POW_FUNC( i32, int32_t ); __HMPP_INT_POW_FUNC( i16, int16_t ); __HMPP_INT_POW_FUNC( i8, int8_t ); #ifndef __HMPP_UINT_POW_FUNC #define __HMPP_UINT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_UINT_POW_FUNC( ui64, uint64_t ); __HMPP_UINT_POW_FUNC( ui32, uint32_t ); __HMPP_UINT_POW_FUNC( ui16, uint16_t ); __HMPP_UINT_POW_FUNC( ui8, uint8_t ); #endif // __HMPP_CUDADATA_H__ #ifndef __HMPPCG_COMPLEX_DOUBLE_DEFINED #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef struct { double x; double y; }__hmppcg_complex_double; #endif /* __HMPPCG_COMPLEX_DOUBLE_DEFINED */ #ifndef __HMPPCG_COMPLEX_FLOAT_DEFINED #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef struct { float x; float y; }__hmppcg_complex_float; #endif /* __HMPPCG_COMPLEX_FLOAT_DEFINED */ template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__runGemm_loop0_( float * HMPPCG_RESTRICT a, float * HMPPCG_RESTRICT b, float * HMPPCG_RESTRICT c) { int32_t j_1; int32_t i_1; j_1 = (blockDimX__ * blockIdx.x + threadIdx.x); i_1 = (blockDimY__ * blockIdx.y + threadIdx.y); bool __hmppcg_guard = (!((j_1 <= 511) & (i_1 <= 511))); if(__hmppcg_guard) { goto __hmppcg_label1; }; { int32_t __hmppcg_end, k_1; for (k_1 = 0, __hmppcg_end = 511; k_1 <= __hmppcg_end; k_1 += 1) { if (k_1 == 0) { c[(i_1 * 512) + j_1] = (c[(i_1 * 512) + j_1]) * ((float) (2123)); } c[(i_1 * 512) + j_1] = (c[(i_1 * 512) + j_1]) + ((((float) (32412)) * (a[(i_1 * 512) + k_1])) * (b[(k_1 * 512) + j_1])); } } __hmppcg_label1:; } void hmpp_codelet__runGemm( int &hmppcg_status_, void * __h, const hipDeviceProp_t &devProp, hipStream_t kernel_stream, hipEvent_t kernel_event, Data<float,DefaultPolicy> & a, Data<float,DefaultPolicy> & b, Data<float,DefaultPolicy> & c) { if(1LL) { unsigned int gridDimX__ = 16LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 64LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 8LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if TORCH_HIP_VERSION >= 3020 a.makeStreamWait(kernel_stream); b.makeStreamWait(kernel_stream); c.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hipLaunchKernelGGL(( hmpp_codelet__runGemm_loop0_<blockDimX__, blockDimY__>), dim3(dim_grid), dim3(dim_block), 0LL, kernel_stream, a.getDeviceAddr(), b.getDeviceAddr(), c.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(hipGetLastError()))) return; #if TORCH_HIP_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(hipEventRecord(kernel_event, kernel_stream)))) return; a.waitOnEvent(kernel_event); b.waitOnEvent(kernel_event); c.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif }; } // HMPP_API #ifdef __cplusplus #define HMPP_EXTERN extern "C" #else #define HMPP_EXTERN #endif #ifdef _WIN32 #define HMPP_EXPORT __declspec(dllexport) #define HMPP_INLINE __inline #else #define HMPP_EXPORT #define HMPP_INLINE inline #endif #define HMPP_API HMPP_EXTERN HMPP_EXPORT // HMPPCG_POP_HASH #define HMPPCG_POP_HASH(major,minor) (((major)<<16)|(minor)) // --------------------------------------------------------------------------- // HMPP handle // --------------------------------------------------------------------------- typedef struct hmpp_handle_struct { Data<float,DefaultPolicy> * __arg0; Data<float,DefaultPolicy> * __arg1; Data<float,DefaultPolicy> * __arg2; hipDeviceProp_t devProp; hipStream_t kernel_stream; hipEvent_t kernel_event; std::map<std::string,UserData*> map_user_data; } hmpp_handle_t; // --------------------------------------------------------------------------- // hmpp_createInstance() // --------------------------------------------------------------------------- HMPP_API hmpp_handle_t * hmpp_createInstance() { hmpp_handle_t * __h = new hmpp_handle_t; if(!__h) return 0; if(CHECK_STATUS(hipStreamCreate(&__h->kernel_stream)) != 0) return NULL; #if TORCH_HIP_VERSION >= 3020 if(CHECK_STATUS(hipEventCreateWithFlags(&__h->kernel_event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return NULL; #else if(CHECK_STATUS(hipEventCreateWithFlags(&__h->kernel_event, hipEventBlockingSync)) != 0) return NULL; #endif __h->__arg0 = NULL; __h->__arg1 = NULL; __h->__arg2 = NULL; int device; hipGetDevice(&device); hipGetDeviceProperties(&(__h->devProp), device); return __h; } // --------------------------------------------------------------------------- // hmpp_freeInstance() // --------------------------------------------------------------------------- HMPP_API int hmpp_freeInstance(hmpp_handle_t * __h) { delete __h->__arg0; delete __h->__arg1; delete __h->__arg2; hipStreamDestroy(__h->kernel_stream); hipEventDestroy(__h->kernel_event); __h->kernel_stream = 0; for(std::map<std::string,UserData*>::const_iterator it = __h->map_user_data.begin(); it != __h->map_user_data.end(); it++) { delete it->second; } delete(__h); return 0; } // --------------------------------------------------------------------------- // hmpp_allocateOnHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_allocateOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { __h->__arg0 = new Data<float,DefaultPolicy>("__arg0", DEFAULT); return __h->__arg0->allocate2(dim, size); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { __h->__arg1 = new Data<float,DefaultPolicy>("__arg1", DEFAULT); return __h->__arg1->allocate2(dim, size); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { __h->__arg2 = new Data<float,DefaultPolicy>("__arg2", DEFAULT); return __h->__arg2->allocate2(dim, size); } default: return -1; } } HMPP_API int hmpp_allocateOutputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInOutOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } // --------------------------------------------------------------------------- // hmpp_readDataFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->download(data,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->download(data,async!=0); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->download(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->upload(data,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->upload(data,async!=0); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->upload(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_readDataSectionFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataSectionFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->downloadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataSectionToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataSectionToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->uploadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForWriteTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForWriteTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForReadTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForReadTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_codeletsAreReentrant() // --------------------------------------------------------------------------- HMPP_API int hmpp_codeletsAreReentrant() { return 0; } // --------------------------------------------------------------------------- // hmpp_start() // --------------------------------------------------------------------------- HMPP_API int hmpp_start(hmpp_handle_t * __h, int __id, int __async) { int status = 0; switch(__id) { case 1: // hmpp_codelet__runGemm(__arg0,__arg1,__arg2) hmpp_codelet__runGemm(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg0), (*__h->__arg1), (*__h->__arg2)); return status; } return -1; } // --------------------------------------------------------------------------- // hmpp_wait() // --------------------------------------------------------------------------- HMPP_API int hmpp_wait(hmpp_handle_t * __h,int codelet_id) { return CHECK_STATUS(hipStreamSynchronize(__h->kernel_stream)); } // --------------------------------------------------------------------------- // hmpp_version() // --------------------------------------------------------------------------- HMPP_API int hmpp_version() { #ifndef HMPP_RUNTIME_TARGET_VERSION #define HMPP_RUNTIME_TARGET_VERSION(major,minor)((major << 16) | (minor << 8)) #endif return HMPP_RUNTIME_TARGET_VERSION(2,5); } //
ff85c61dd0e6fc93d5f39f749efa8c054d699e9a.cu
// ** Original codelet code ** // // #pragma hmppcg cpiparam __arg0 IN a%hmpp_codelet__runGemm: (1, 0) // #pragma hmppcg cpiparam __arg1 IN b%hmpp_codelet__runGemm: (1, 1) // #pragma hmppcg cpiparam __arg2 INOUT c%hmpp_codelet__runGemm: (1, 2) // // #pragma hmppcg cpicall hmpp_codelet__runGemm(__arg0, __arg1, __arg2): 1 // // // /* begin of extracted source code for directive set "gemm" */ // // // # 30 "gemm.c" // typedef float DATA_TYPE; // // // # 35 "gemm.c" // void hmpp_codelet__runGemm(DATA_TYPE a[512][512], DATA_TYPE b[512][512], DATA_TYPE c[512][512]) // { // int i, j, k; // DATA_TYPE p_alpha = 32412; // DATA_TYPE p_beta = 2123; // // // #pragma hmppcg grid blocksize 32 X 8 // # 12 "<preprocessor>" // # 43 "gemm.c" // #pragma hmppcg permute i, j, k // # 15 "<preprocessor>" // # 44 "gemm.c" // for (i = 0 ; i < 512 ; i++) // { // for (j = 0 ; j < 512 ; j++) // { // c[i][j] *= p_beta; // // for (k = 0 ; k < 512 ; ++k) // { // c[i][j] += p_alpha * a[i][k] * b[k][j]; // } // } // } // } // // // /* end of extracted source code for directive set "gemm" */ // // // // ** End of original codelet codelet ** #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _MSC_VER # define HMPPCG_RESTRICT typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; # ifdef _WIN64 typedef int64_t intptr_t; # else typedef int32_t intptr_t; # endif #else # if defined(__GNUC__) || defined(__RESTRICT) # define HMPPCG_RESTRICT __restrict # else # define HMPPCG_RESTRICT # endif # include <stdint.h> #endif // Dynamic array typedef struct __hmppcg_array_struct { void *array; size_t *size; size_t elsize; } __hmppcg_array_t; // Data section typedef struct __hmppcg_DataSection { size_t from; size_t to; size_t step; } __hmppcg_DataSection; #include <cuda.h> #if CUDART_VERSION < 2000 #error Bad CUDA Runtime version. CUDA Toolkit 2.0+ required. #endif #define HMPP_CONSTMEM_OFFSET 0 #include <map> #include <string> // ---------------------------------------------------------------------------- // HMPP CUDA support classes // ---------------------------------------------------------------------------- #ifndef __HMPP_CUDADATA_H__ #define __HMPP_CUDADATA_H__ #ifndef HMPPCG_WARP_SIZE #define HMPPCG_WARP_SIZE 32 #endif enum CopyKind { HostToHost = 0, HostToDevice = 1, DeviceToHost = 2, DeviceToDevice = 3, }; inline int hmppcg_check_status(const char *file,int line,cudaError_t status) { if(status != cudaSuccess) { fprintf(stderr, "%s:%d CUDA Error: %s\n", file, line, cudaGetErrorString(status)); return -1; } return 0; } #define CHECK_STATUS(X) hmppcg_check_status(__FILE__,__LINE__,(X)) #define HMPP_CHECK_GRID_BOUNDARY(x) \ if(x>65535){\ fprintf(stderr, "%s:%d Grid Dimension Error: '%s' exceeds the 65535 dimension limit. Please modify the grid size configuration (see the hmppcg grid blocksize pragma) or switch to 2D gridification\n", __FILE__,__LINE__, #x);\ exit(-1) ;\ } #define HMPP_CHECK_BLOCK_BOUNDARY(x) \ if(x > devProp.maxThreadsPerBlock){ \ fprintf(stderr, "%s:%d Number of threads per block exceeds for the HWA: it is '%d' and HWA supports up to '%d'. Please modify the block size configuration (see the hmppcg grid blocksize pragma)\n", __FILE__,__LINE__, x, devProp.maxThreadsPerBlock); \ exit(-1) ; \ } // ---------------------------------------------------------------------------- // class DefaultPolicy // ---------------------------------------------------------------------------- struct DefaultPolicy { public: DefaultPolicy() { } virtual ~DefaultPolicy() { } int deviceAlloc(void **ptr,size_t size) { if( CHECK_STATUS(cudaStreamCreate(&stream_)) != 0 ) return -1; if( CHECK_STATUS(cudaMalloc(ptr,size)) != 0 ) return -1; #if CUDA_VERSION >= 3020 if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventBlockingSync)) != 0) return -1; #endif return 0; } int deviceFree(void *ptr) { if( CHECK_STATUS(cudaStreamDestroy(stream_)) != 0) return -1; if( CHECK_STATUS(cudaFree(ptr)) != 0) return -1; if( CHECK_STATUS(cudaEventDestroy(event)) != 0) return -1; return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { static cudaMemcpyKind cudaKind[] = {cudaMemcpyHostToHost, cudaMemcpyHostToDevice, cudaMemcpyDeviceToHost, cudaMemcpyDeviceToDevice }; if(async) { return CHECK_STATUS(cudaMemcpyAsync(dst,src,size,cudaKind[kind],stream_)); } else { return CHECK_STATUS(cudaMemcpy(dst,src,size,cudaKind[kind])); } } int makeStreamWait(cudaStream_t wstream) { int status; status = CHECK_STATUS(cudaEventRecord(event, stream_)); if (status != 0) return status; #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(cudaEventSynchronize(event)); #endif } int waitOnEvent(cudaEvent_t wevent) { #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(stream_, wevent, 0)); #else return CHECK_STATUS(cudaEventSynchronize(wevent)); #endif } int deviceWait() { return CHECK_STATUS(cudaStreamSynchronize(stream_)); } private: cudaStream_t stream_; cudaEvent_t event; }; // ---------------------------------------------------------------------------- // class ConstantPolicy // ---------------------------------------------------------------------------- #ifndef HMPP_CONSTMEM_SIZE #define HMPP_CONSTMEM_SIZE 2048 #endif __constant__ int64_t hmpp_constmem[HMPP_CONSTMEM_SIZE / 8]; /// Shared memory array is aligned on 64 bit thanks to that (to avoid an nvcc compilation error) extern __shared__ int64_t hmpp_sharedmem[]; struct ConstantPolicy { public: ConstantPolicy() { static bool initialized = false; if(!initialized) { next_offset_ = HMPP_CONSTMEM_OFFSET; initialized = true; } offset_ = -1; } virtual ~ConstantPolicy() { } void setStaticOffset(int offset) { offset_ = offset; while(offset_ % 8) offset_ ++; } int deviceAlloc(void **ptr, size_t size) { #if CUDA_VERSION >= 3020 if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventBlockingSync)) != 0) return -1; #endif if(offset_ != -1) { if((offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)offset_; return 0; } if((next_offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)next_offset_; next_offset_ += size; return 0; } int deviceFree(void *ptr) { return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { size_t offset; switch(kind) { case HostToDevice: offset = (size_t)dst; return CHECK_STATUS(cudaMemcpyToSymbol(hmpp_constmem,src,size,offset,cudaMemcpyHostToDevice)); case DeviceToHost: offset = (size_t)src; return CHECK_STATUS(cudaMemcpyFromSymbol(dst,hmpp_constmem,size,offset,cudaMemcpyDeviceToHost)); default: return -1; } } int makeStreamWait(cudaStream_t wstream) { int status; /* stream 0 at the moment */ status = CHECK_STATUS(cudaEventRecord(event, 0)); if (status != 0) return status; #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(cudaEventSynchronize(event)); #endif } int waitOnEvent(cudaEvent_t wevent) { /* stream 0 at the moment */ #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(0, wevent, 0)); #else return CHECK_STATUS(cudaEventSynchronize(wevent)); #endif } int deviceWait() { return 0; } private: static size_t next_offset_; int offset_; cudaEvent_t event; }; size_t ConstantPolicy::next_offset_; // ---------------------------------------------------------------------------- // class Lazy // ---------------------------------------------------------------------------- template <typename Policy> struct Lazy { char * value; bool valid; bool allocated; void ** devaddr; Policy * policy; size_t size; Lazy(size_t elem_size) { value = new char[elem_size]; } ~Lazy() { delete[] value; } int requireDeviceAlloc() { if(!allocated) { allocated = true; return policy->deviceAlloc(devaddr,size); } else { return 0; } } }; // ---------------------------------------------------------------------------- // class Element // ---------------------------------------------------------------------------- template <typename T,typename Policy> struct Element { Element(void * const * device_addr, size_t offset, Policy *policy, Lazy<Policy> * lazy) : device_addr_(device_addr) , offset_(offset), policy_(policy), lazy_(lazy) { } Element &operator=(const T & value) { if(lazy_) { *((T *)(lazy_->value)) = value; lazy_->valid = true; return *this; } if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,(const char*)&value,ElemSize,HostToDevice,false); return *this; } Element &operator=(const Element & src) { if(src.lazy_ && src.lazy_->valid) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)(src.lazy_->value)); return *this; } if(lazy_) lazy_->requireDeviceAlloc(); if(src.lazy_) src.lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,((const char*)(*src.device_addr_)) + src.offset_, ElemSize,DeviceToDevice,false); if(lazy_) { lazy_->valid = false; } return *this; } operator T() { if(lazy_ && lazy_->valid) return *((T *)(lazy_->value)); T res; if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(&res,((const char*)(*device_addr_)) + offset_,ElemSize,DeviceToHost,false); if(lazy_) { *((T *)(lazy_->value)) = res; lazy_->valid = true; } return res; } typedef T Type; enum { ElemSize = sizeof(T) }; private: size_t offset_; void *const* device_addr_; Policy *policy_; public: Lazy<Policy> * lazy_; }; enum DataFlags { DEFAULT = 0x0, LAZY = 0x1 }; // ---------------------------------------------------------------------------- // class Data // ---------------------------------------------------------------------------- template <typename T,typename Policy> class Data { public: typedef T Type; typedef Element<T,Policy> ElementType; enum { ElemSize = sizeof(T) }; Data(const char * name, unsigned int flags = DEFAULT) : name_(name), flags_(flags), dim_(0), sizes_(0), size_(0), host_addr_(0), device_addr_(0) { policy_ = new Policy; if(flags_ & LAZY) { lazy_ = new Lazy<Policy>(ElemSize); lazy_->valid = false; lazy_->devaddr = 0; lazy_->policy = policy_; } else lazy_ = 0; } ~Data() { free(); delete policy_; if(lazy_) delete lazy_; } int allocate(unsigned int dim, size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { const size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return allocate2(dim,sizes); } int allocate3(unsigned int dim_p, const size_t * sizes_p) { size_t sizes[2]; sizes[0] = 1; sizes[1] = 0; for(int d = 0 ; d < dim_p ; d++) { sizes[0] *= sizes_p[d]; } return allocate2(1, sizes); } int allocate2(unsigned int dim, const size_t * sizes) { dim_ = dim; sizes_ = new size_t[dim]; dimSizes_ = new size_t[dim]; size_ = ElemSize; for(int d=0;d<dim;d++) { sizes_[d] = sizes[d]; size_ *= sizes_[d]; size_t size = 1; for(int d2=d+1;d2<dim;d2++) size*=sizes[d2]; dimSizes_[d] = size; } if(lazy_) { lazy_->allocated = false; lazy_->devaddr = &device_addr_; lazy_->size = size_; return 0; } else return policy_->deviceAlloc(&device_addr_,size_); } int free() { if(sizes_) { delete [] sizes_; delete [] dimSizes_; sizes_ = 0; dim_ = 0; size_ = 0; } if(device_addr_) { if(policy_->deviceFree(device_addr_) != 0) return -1; device_addr_ = 0; } return 0; } int download(void * host_addr,bool async) { if(lazy_ && lazy_->valid) { *((T *)host_addr) = *((T *)(lazy_->value)); return 0; } if(lazy_) { lazy_->requireDeviceAlloc(); } int sts = policy_->deviceMemcpy(host_addr,device_addr_,size_,DeviceToHost,async); if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)host_addr); } return sts; } int upload(const void * host_addr,bool async) { if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = * ((T *)host_addr); lazy_->requireDeviceAlloc(); } return policy_->deviceMemcpy(device_addr_,host_addr,size_,HostToDevice,async); } int downloadSection(void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(host_addr,device_addr_,sections,DeviceToHost,async); } int uploadSection(const void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(device_addr_,host_addr,sections,HostToDevice,async); } int makeStreamWait(cudaStream_t wstream) { if(lazy_) lazy_->requireDeviceAlloc(); return policy_->makeStreamWait(wstream); } int waitOnEvent(cudaEvent_t wevent) { return policy_->waitOnEvent(wevent); } int waitTransfer() { return policy_->deviceWait(); } ElementType operator()(size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return at(sizes); } ElementType at(size_t *idx) { size_t offset = idx[0]; return ElementType(&device_addr_,offset*ElemSize,policy_,lazy_); } template <typename Y> Element<Y,Policy> at(size_t offset) { return Element<Y,Policy>(&device_addr_,offset,policy_,lazy_); } ElementType operator=(const T & value) { ElementType res(&device_addr_,0,policy_,lazy_); res = value; return res; } ElementType operator=(const Data &data) { return operator=(data.value()); } T value() const { ElementType res(&device_addr_,0,policy_,lazy_); return (T)res; } operator T() { return value(); } T *getDeviceAddr() { if(lazy_) lazy_->requireDeviceAlloc(); if(lazy_ && lazy_->valid) { policy_->deviceMemcpy(device_addr_,lazy_->value,size_,HostToDevice,false); } return (T*)device_addr_; } void invalidateLazy() { if(lazy_) { lazy_->valid = false; } } private: Data(const Data &data) {} int sectionCopy(char *dst,const char *src,int offset,int cur, const __hmppcg_DataSection *sections,int lastdense,CopyKind kind,bool async) { int d; int size = 1; for(d=cur+1;d<dim_;d++) size *= sizes_[d]; if(cur<(lastdense-1)) { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=sections[cur].step) if(sectionCopy(dst,src,offset+x*size,cur+1,sections,lastdense,kind,async) != 0) return -1; } else { int step = sections[cur].step; if(step == 1) { int start = (offset + sections[cur].from * size) * ElemSize; int total = (sections[cur].to - sections[cur].from + 1) * size * ElemSize; return policy_->deviceMemcpy(dst+start,src+start,total,kind,async); } else { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=step) { int off = (offset + x * size) * ElemSize; if(policy_->deviceMemcpy(dst+off,src+off,size * ElemSize,kind,async) != 0) return -1; } } } return 0; } int sectionCopy(void *dst,const void *src, const __hmppcg_DataSection *sections,CopyKind kind,bool async) { int i; int lastdense = dim_; for (i = dim_ - 1 ; i >= 0 ; i --) { if ((sections[i].from == 0) && (sections[i].to == sizes_[i] - 1) && (sections[i].step == 1)) lastdense = i; else break; } return sectionCopy((char*)dst,(const char*)src,0,0,sections,lastdense,kind,async); } const char * name_; size_t flags_; void *device_addr_; void *host_addr_; size_t dim_; size_t *sizes_; size_t *dimSizes_; size_t size_; Lazy<Policy> * lazy_; public: Policy *policy_; }; // --------------------------------------------------------------------------- // User data // --------------------------------------------------------------------------- class UserData{ public: virtual ~UserData(){} UserData(){} }; #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef float2 __hmppcg_complex_float; #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef double2 __hmppcg_complex_double; // --------------------------------------------------------------------------- // Allocatable Arrays // --------------------------------------------------------------------------- template <const size_t nb_dims> struct AArrayDesc { int lbounds_[nb_dims]; size_t sizes_[nb_dims]; size_t wholesize_; }; #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE( var, type, nb_dims, ... ) \ { int alloc_ranges[] = { __VA_ARGS__ }; \ int hmppcg_alloc_i; \ var ## _aarray_desc.wholesize_ = 1; \ for(hmppcg_alloc_i=0; hmppcg_alloc_i<nb_dims; hmppcg_alloc_i++){ \ int hmppcg_alloc_first = alloc_ranges[2*hmppcg_alloc_i]; \ int hmppcg_alloc_last = alloc_ranges[2*hmppcg_alloc_i + 1]; \ int hmppcg_alloc_size = hmppcg_alloc_last - hmppcg_alloc_first + 1; \ var ## _aarray_desc.lbounds_[hmppcg_alloc_i] = hmppcg_alloc_first; \ var ## _aarray_desc.sizes_[hmppcg_alloc_i] = hmppcg_alloc_size; \ var ## _aarray_desc.wholesize_ *= hmppcg_alloc_size; \ } \ if((hmppcg_status_ = var.allocate2(nb_dims, var ## _aarray_desc.sizes_))) \ return; \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE( var ) \ { \ var.free(); \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED( var ) \ (var.getDeviceAddr() != NULL) #endif //__HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #ifndef __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #define __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE( var ) \ var ## _aarray_desc.wholesize_ #endif //__HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_SIZE #define __HMPPCG_ALLOCATABLE_ARRAY_SIZE( var, d ) \ var ## _aarray_desc.sizes_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_SIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_LBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_LBOUND( var, d ) \ var ## _aarray_desc.lbounds_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_LBOUND #ifndef __HMPPCG_ALLOCATABLE_ARRAY_UBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_UBOUND( var, d ) \ (var ## _aarray_desc.sizes_[d] + var ## _aarray_desc.lbounds_[d] - 1) #endif //__HMPPCG_ALLOCATABLE_ARRAY_UBOUND #ifndef __HMPP_INT_POW_FUNC #define __HMPP_INT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ if(exp < 0) \ return 0; \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_INT_POW_FUNC( i64, int64_t ); __HMPP_INT_POW_FUNC( i32, int32_t ); __HMPP_INT_POW_FUNC( i16, int16_t ); __HMPP_INT_POW_FUNC( i8, int8_t ); #ifndef __HMPP_UINT_POW_FUNC #define __HMPP_UINT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_UINT_POW_FUNC( ui64, uint64_t ); __HMPP_UINT_POW_FUNC( ui32, uint32_t ); __HMPP_UINT_POW_FUNC( ui16, uint16_t ); __HMPP_UINT_POW_FUNC( ui8, uint8_t ); #endif // __HMPP_CUDADATA_H__ #ifndef __HMPPCG_COMPLEX_DOUBLE_DEFINED #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef struct { double x; double y; }__hmppcg_complex_double; #endif /* __HMPPCG_COMPLEX_DOUBLE_DEFINED */ #ifndef __HMPPCG_COMPLEX_FLOAT_DEFINED #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef struct { float x; float y; }__hmppcg_complex_float; #endif /* __HMPPCG_COMPLEX_FLOAT_DEFINED */ template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__runGemm_loop0_( float * HMPPCG_RESTRICT a, float * HMPPCG_RESTRICT b, float * HMPPCG_RESTRICT c) { int32_t j_1; int32_t i_1; j_1 = (blockDimX__ * blockIdx.x + threadIdx.x); i_1 = (blockDimY__ * blockIdx.y + threadIdx.y); bool __hmppcg_guard = (!((j_1 <= 511) & (i_1 <= 511))); if(__hmppcg_guard) { goto __hmppcg_label1; }; { int32_t __hmppcg_end, k_1; for (k_1 = 0, __hmppcg_end = 511; k_1 <= __hmppcg_end; k_1 += 1) { if (k_1 == 0) { c[(i_1 * 512) + j_1] = (c[(i_1 * 512) + j_1]) * ((float) (2123)); } c[(i_1 * 512) + j_1] = (c[(i_1 * 512) + j_1]) + ((((float) (32412)) * (a[(i_1 * 512) + k_1])) * (b[(k_1 * 512) + j_1])); } } __hmppcg_label1:; } void hmpp_codelet__runGemm( int &hmppcg_status_, void * __h, const cudaDeviceProp &devProp, cudaStream_t kernel_stream, cudaEvent_t kernel_event, Data<float,DefaultPolicy> & a, Data<float,DefaultPolicy> & b, Data<float,DefaultPolicy> & c) { if(1LL) { unsigned int gridDimX__ = 16LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 64LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 8LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if CUDA_VERSION >= 3020 a.makeStreamWait(kernel_stream); b.makeStreamWait(kernel_stream); c.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hmpp_codelet__runGemm_loop0_<blockDimX__, blockDimY__><<<dim_grid, dim_block, 0LL, kernel_stream>>>(a.getDeviceAddr(), b.getDeviceAddr(), c.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(cudaGetLastError()))) return; #if CUDA_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(cudaEventRecord(kernel_event, kernel_stream)))) return; a.waitOnEvent(kernel_event); b.waitOnEvent(kernel_event); c.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif }; } // HMPP_API #ifdef __cplusplus #define HMPP_EXTERN extern "C" #else #define HMPP_EXTERN #endif #ifdef _WIN32 #define HMPP_EXPORT __declspec(dllexport) #define HMPP_INLINE __inline #else #define HMPP_EXPORT #define HMPP_INLINE inline #endif #define HMPP_API HMPP_EXTERN HMPP_EXPORT // HMPPCG_POP_HASH #define HMPPCG_POP_HASH(major,minor) (((major)<<16)|(minor)) // --------------------------------------------------------------------------- // HMPP handle // --------------------------------------------------------------------------- typedef struct hmpp_handle_struct { Data<float,DefaultPolicy> * __arg0; Data<float,DefaultPolicy> * __arg1; Data<float,DefaultPolicy> * __arg2; cudaDeviceProp devProp; cudaStream_t kernel_stream; cudaEvent_t kernel_event; std::map<std::string,UserData*> map_user_data; } hmpp_handle_t; // --------------------------------------------------------------------------- // hmpp_createInstance() // --------------------------------------------------------------------------- HMPP_API hmpp_handle_t * hmpp_createInstance() { hmpp_handle_t * __h = new hmpp_handle_t; if(!__h) return 0; if(CHECK_STATUS(cudaStreamCreate(&__h->kernel_stream)) != 0) return NULL; #if CUDA_VERSION >= 3020 if(CHECK_STATUS(cudaEventCreateWithFlags(&__h->kernel_event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return NULL; #else if(CHECK_STATUS(cudaEventCreateWithFlags(&__h->kernel_event, cudaEventBlockingSync)) != 0) return NULL; #endif __h->__arg0 = NULL; __h->__arg1 = NULL; __h->__arg2 = NULL; int device; cudaGetDevice(&device); cudaGetDeviceProperties(&(__h->devProp), device); return __h; } // --------------------------------------------------------------------------- // hmpp_freeInstance() // --------------------------------------------------------------------------- HMPP_API int hmpp_freeInstance(hmpp_handle_t * __h) { delete __h->__arg0; delete __h->__arg1; delete __h->__arg2; cudaStreamDestroy(__h->kernel_stream); cudaEventDestroy(__h->kernel_event); __h->kernel_stream = 0; for(std::map<std::string,UserData*>::const_iterator it = __h->map_user_data.begin(); it != __h->map_user_data.end(); it++) { delete it->second; } delete(__h); return 0; } // --------------------------------------------------------------------------- // hmpp_allocateOnHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_allocateOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { __h->__arg0 = new Data<float,DefaultPolicy>("__arg0", DEFAULT); return __h->__arg0->allocate2(dim, size); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { __h->__arg1 = new Data<float,DefaultPolicy>("__arg1", DEFAULT); return __h->__arg1->allocate2(dim, size); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { __h->__arg2 = new Data<float,DefaultPolicy>("__arg2", DEFAULT); return __h->__arg2->allocate2(dim, size); } default: return -1; } } HMPP_API int hmpp_allocateOutputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInOutOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } // --------------------------------------------------------------------------- // hmpp_readDataFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->download(data,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->download(data,async!=0); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->download(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->upload(data,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->upload(data,async!=0); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->upload(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_readDataSectionFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataSectionFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->downloadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataSectionToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataSectionToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->uploadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForWriteTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForWriteTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForReadTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForReadTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runGemm { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__runGemm { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,2): // c@hmpp_codelet__runGemm { return __h->__arg2->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_codeletsAreReentrant() // --------------------------------------------------------------------------- HMPP_API int hmpp_codeletsAreReentrant() { return 0; } // --------------------------------------------------------------------------- // hmpp_start() // --------------------------------------------------------------------------- HMPP_API int hmpp_start(hmpp_handle_t * __h, int __id, int __async) { int status = 0; switch(__id) { case 1: // hmpp_codelet__runGemm(__arg0,__arg1,__arg2) hmpp_codelet__runGemm(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg0), (*__h->__arg1), (*__h->__arg2)); return status; } return -1; } // --------------------------------------------------------------------------- // hmpp_wait() // --------------------------------------------------------------------------- HMPP_API int hmpp_wait(hmpp_handle_t * __h,int codelet_id) { return CHECK_STATUS(cudaStreamSynchronize(__h->kernel_stream)); } // --------------------------------------------------------------------------- // hmpp_version() // --------------------------------------------------------------------------- HMPP_API int hmpp_version() { #ifndef HMPP_RUNTIME_TARGET_VERSION #define HMPP_RUNTIME_TARGET_VERSION(major,minor)((major << 16) | (minor << 8)) #endif return HMPP_RUNTIME_TARGET_VERSION(2,5); } //
0903418ffca66fafa5742da88fea6a353946df96.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <stdbool.h> #include <hip/hip_runtime.h> #define H 10000 #define W 10000 #define K 3 #define N H * W #define THREADSPERBLOCK 1 #define BLOCKSPERGRIDX (H + THREADSPERBLOCK - 1) / THREADSPERBLOCK #define BLOCKSPERGRIDY (W + THREADSPERBLOCK - 1) / THREADSPERBLOCK double kernel[K * K]; unsigned char input[N]; unsigned char output[N]; unsigned char outputGPU[N]; bool convolve2DSlow(unsigned char *in, unsigned char *out, int dataSizeX, int dataSizeY, double *kernel, int kernelSizeX, int kernelSizeY); __global__ void convolve2D_GPU( unsigned char *input, unsigned char *output, double *kernel); int main() { int cpu = true; double elapsedTimeCPU; struct timespec t_start, t_end; float elapsedTimeGPU; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int i, j; srand(time(NULL)); for (i = 0; i < N; i++) { input[i] = (unsigned char) (rand() % 255 + 1); } srand(time(NULL)); for (i = 0; i < K * K; i++) { kernel[i] = (unsigned char) (rand() % 255 + 1); } double *d_kernel; unsigned char *d_input; unsigned char *d_output; if (cpu) { clock_gettime( CLOCK_REALTIME, &t_start); convolve2DSlow(input, output, W, H, kernel, K, K); clock_gettime( CLOCK_REALTIME, &t_end); elapsedTimeCPU = (t_end.tv_sec - t_start.tv_sec) * 1000.0; elapsedTimeCPU += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0; printf("CPU elapsedTime: %lf ms\n", elapsedTimeCPU); } hipMalloc( (void**)&d_input, N * sizeof(unsigned char) ); hipMalloc( (void**)&d_kernel, K * K * sizeof(double) ); hipMalloc( (void**)&d_output, N * sizeof(unsigned char) ); hipMemcpy( d_input, input, N * sizeof(unsigned char), hipMemcpyHostToDevice ); hipMemcpy( d_kernel, kernel, K * K * sizeof(double), hipMemcpyHostToDevice ); dim3 dimGrid (BLOCKSPERGRIDX, BLOCKSPERGRIDY, 1); dim3 dimBlock (THREADSPERBLOCK, THREADSPERBLOCK, 1); hipEventRecord(start, 0); hipLaunchKernelGGL(( convolve2D_GPU), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input, d_output, d_kernel); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTimeGPU, start, stop); printf("GPU time: %13f msec\n", elapsedTimeGPU); hipMemcpy( outputGPU, d_output, N * sizeof(unsigned char), hipMemcpyDeviceToHost ); //check results if (cpu) { int pass = 1; for(i = 0; i < H; i++){ for(j = 0; j < W; j++){ if((output[i * W + j] - outputGPU[i * W + j]) > 0.00001){ pass = 0; break; } } } if(pass == 0) printf("Test Fail!\n"); else{ printf("Test pass!\n"); printf("GPU / CPU = %f\n", elapsedTimeCPU / elapsedTimeGPU); } } return 0; } __global__ void convolve2D_GPU( unsigned char *input, unsigned char *output, double *kernel) { int m, n, mm, nn; int kCenterX, kCenterY; double sum; int rowIndex, colIndex; int dataSizeX, dataSizeY, kernelSizeX, kernelSizeY; dataSizeX = W; dataSizeY = H; kernelSizeX = kernelSizeY = K; kCenterX = kernelSizeX / 2; kCenterY = kernelSizeY / 2; int x, y; int tx = blockIdx.x; int ty = gridDim.x * blockIdx.y; int tid = tx + ty; while (tid < H * W) { sum = 0; x = tid % W; y = tid / W; for(m = 0; m < kernelSizeY; ++m){ mm = kernelSizeY - 1 - m; for(n = 0; n < kernelSizeX; ++n){ nn = kernelSizeX - 1 - n; rowIndex = y + (kCenterY - mm); colIndex = x + (kCenterX - nn); if(rowIndex >= 0 && rowIndex < dataSizeY && colIndex >= 0 && colIndex < dataSizeX) sum += input[dataSizeX * rowIndex + colIndex] * kernel[kernelSizeX * m + n]; } } output[tid] = (unsigned char)(fabs(sum) + 0.5f); tid = tid + gridDim.x * gridDim.y; } } bool convolve2DSlow(unsigned char *in, unsigned char *out, int dataSizeX, int dataSizeY, double *kernel, int kernelSizeX, int kernelSizeY) { int i, j, m, n, mm, nn; int kCenterX, kCenterY; double sum; int rowIndex, colIndex; if(!in || !out || !kernel) return false; if(dataSizeX <= 0 || kernelSizeX <= 0) return false; kCenterX = kernelSizeX / 2; kCenterY = kernelSizeY / 2; for(i = 0; i < dataSizeY; ++i){ for(j = 0; j < dataSizeX; ++j){ sum = 0; for(m = 0; m < kernelSizeY; ++m){ mm = kernelSizeY - 1 - m; for(n = 0; n < kernelSizeX; ++n){ nn = kernelSizeX - 1 - n; rowIndex = i + (kCenterY - mm); colIndex = j + (kCenterX - nn); if(rowIndex >= 0 && rowIndex < dataSizeY && colIndex >= 0 && colIndex < dataSizeX) sum += in[dataSizeX * rowIndex + colIndex] * kernel[kernelSizeX * m + n]; } } out[dataSizeX * i + j] = (unsigned char)(fabs(sum) + 0.5f); } } return true; }
0903418ffca66fafa5742da88fea6a353946df96.cu
#include <stdio.h> #include <math.h> #include <stdbool.h> #include <cuda_runtime.h> #define H 10000 #define W 10000 #define K 3 #define N H * W #define THREADSPERBLOCK 1 #define BLOCKSPERGRIDX (H + THREADSPERBLOCK - 1) / THREADSPERBLOCK #define BLOCKSPERGRIDY (W + THREADSPERBLOCK - 1) / THREADSPERBLOCK double kernel[K * K]; unsigned char input[N]; unsigned char output[N]; unsigned char outputGPU[N]; bool convolve2DSlow(unsigned char *in, unsigned char *out, int dataSizeX, int dataSizeY, double *kernel, int kernelSizeX, int kernelSizeY); __global__ void convolve2D_GPU( unsigned char *input, unsigned char *output, double *kernel); int main() { int cpu = true; double elapsedTimeCPU; struct timespec t_start, t_end; float elapsedTimeGPU; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int i, j; srand(time(NULL)); for (i = 0; i < N; i++) { input[i] = (unsigned char) (rand() % 255 + 1); } srand(time(NULL)); for (i = 0; i < K * K; i++) { kernel[i] = (unsigned char) (rand() % 255 + 1); } double *d_kernel; unsigned char *d_input; unsigned char *d_output; if (cpu) { clock_gettime( CLOCK_REALTIME, &t_start); convolve2DSlow(input, output, W, H, kernel, K, K); clock_gettime( CLOCK_REALTIME, &t_end); elapsedTimeCPU = (t_end.tv_sec - t_start.tv_sec) * 1000.0; elapsedTimeCPU += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0; printf("CPU elapsedTime: %lf ms\n", elapsedTimeCPU); } cudaMalloc( (void**)&d_input, N * sizeof(unsigned char) ); cudaMalloc( (void**)&d_kernel, K * K * sizeof(double) ); cudaMalloc( (void**)&d_output, N * sizeof(unsigned char) ); cudaMemcpy( d_input, input, N * sizeof(unsigned char), cudaMemcpyHostToDevice ); cudaMemcpy( d_kernel, kernel, K * K * sizeof(double), cudaMemcpyHostToDevice ); dim3 dimGrid (BLOCKSPERGRIDX, BLOCKSPERGRIDY, 1); dim3 dimBlock (THREADSPERBLOCK, THREADSPERBLOCK, 1); cudaEventRecord(start, 0); convolve2D_GPU<<<dimGrid, dimBlock>>>(d_input, d_output, d_kernel); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimeGPU, start, stop); printf("GPU time: %13f msec\n", elapsedTimeGPU); cudaMemcpy( outputGPU, d_output, N * sizeof(unsigned char), cudaMemcpyDeviceToHost ); //check results if (cpu) { int pass = 1; for(i = 0; i < H; i++){ for(j = 0; j < W; j++){ if((output[i * W + j] - outputGPU[i * W + j]) > 0.00001){ pass = 0; break; } } } if(pass == 0) printf("Test Fail!\n"); else{ printf("Test pass!\n"); printf("GPU / CPU = %f\n", elapsedTimeCPU / elapsedTimeGPU); } } return 0; } __global__ void convolve2D_GPU( unsigned char *input, unsigned char *output, double *kernel) { int m, n, mm, nn; int kCenterX, kCenterY; double sum; int rowIndex, colIndex; int dataSizeX, dataSizeY, kernelSizeX, kernelSizeY; dataSizeX = W; dataSizeY = H; kernelSizeX = kernelSizeY = K; kCenterX = kernelSizeX / 2; kCenterY = kernelSizeY / 2; int x, y; int tx = blockIdx.x; int ty = gridDim.x * blockIdx.y; int tid = tx + ty; while (tid < H * W) { sum = 0; x = tid % W; y = tid / W; for(m = 0; m < kernelSizeY; ++m){ mm = kernelSizeY - 1 - m; for(n = 0; n < kernelSizeX; ++n){ nn = kernelSizeX - 1 - n; rowIndex = y + (kCenterY - mm); colIndex = x + (kCenterX - nn); if(rowIndex >= 0 && rowIndex < dataSizeY && colIndex >= 0 && colIndex < dataSizeX) sum += input[dataSizeX * rowIndex + colIndex] * kernel[kernelSizeX * m + n]; } } output[tid] = (unsigned char)(fabs(sum) + 0.5f); tid = tid + gridDim.x * gridDim.y; } } bool convolve2DSlow(unsigned char *in, unsigned char *out, int dataSizeX, int dataSizeY, double *kernel, int kernelSizeX, int kernelSizeY) { int i, j, m, n, mm, nn; int kCenterX, kCenterY; double sum; int rowIndex, colIndex; if(!in || !out || !kernel) return false; if(dataSizeX <= 0 || kernelSizeX <= 0) return false; kCenterX = kernelSizeX / 2; kCenterY = kernelSizeY / 2; for(i = 0; i < dataSizeY; ++i){ for(j = 0; j < dataSizeX; ++j){ sum = 0; for(m = 0; m < kernelSizeY; ++m){ mm = kernelSizeY - 1 - m; for(n = 0; n < kernelSizeX; ++n){ nn = kernelSizeX - 1 - n; rowIndex = i + (kCenterY - mm); colIndex = j + (kCenterX - nn); if(rowIndex >= 0 && rowIndex < dataSizeY && colIndex >= 0 && colIndex < dataSizeX) sum += in[dataSizeX * rowIndex + colIndex] * kernel[kernelSizeX * m + n]; } } out[dataSizeX * i + j] = (unsigned char)(fabs(sum) + 0.5f); } } return true; }
5046d665bb150240105fbda7dc300600effc131c.hip
// !!! This is a file automatically generated by hipify!!! #include "gpuHelpers.h" #include <iostream> #include <helper_cuda.h> void checkCublas(hipblasStatus_t s) { if (s != HIPBLAS_STATUS_SUCCESS) { switch (s) { case HIPBLAS_STATUS_ALLOC_FAILED: std::cerr << "HIPBLAS_STATUS_ALLOC_FAILED" ; break; case HIPBLAS_STATUS_ARCH_MISMATCH: std::cerr << "HIPBLAS_STATUS_ARCH_MISMATCH" ; break; case HIPBLAS_STATUS_EXECUTION_FAILED: std::cerr << "HIPBLAS_STATUS_EXECUTION_FAILED" ; break; case HIPBLAS_STATUS_INTERNAL_ERROR: std::cerr << "HIPBLAS_STATUS_INTERNAL_ERROR" ; break; case HIPBLAS_STATUS_INVALID_VALUE: std::cerr << "HIPBLAS_STATUS_INVALID_VALUE" ; break; case HIPBLAS_STATUS_MAPPING_ERROR: std::cerr << "HIPBLAS_STATUS_MAPPING_ERROR" ; break; case HIPBLAS_STATUS_NOT_INITIALIZED: std::cerr << "HIPBLAS_STATUS_NOT_INITIALIZED" ; break; default: std::cerr << "CUBLAS_UNKNOWN_ERROR" ; } }; } void checkCusparse(hipsparseStatus_t s) { if (s != HIPSPARSE_STATUS_SUCCESS) { switch (s) { case HIPSPARSE_STATUS_NOT_INITIALIZED: std::cerr << "HIPSPARSE_STATUS_NOT_INITIALIZED" ; break; case HIPSPARSE_STATUS_ALLOC_FAILED: std::cerr << "HIPSPARSE_STATUS_ALLOC_FAILED" ; break; case HIPSPARSE_STATUS_INVALID_VALUE: std::cerr << "HIPSPARSE_STATUS_INVALID_VALUE" ; break; case HIPSPARSE_STATUS_ARCH_MISMATCH: std::cerr << "HIPSPARSE_STATUS_ARCH_MISMATCH" ; break; case HIPSPARSE_STATUS_MAPPING_ERROR: std::cerr << "HIPSPARSE_STATUS_MAPPING_ERROR" ; break; case HIPSPARSE_STATUS_EXECUTION_FAILED: std::cerr << "HIPSPARSE_STATUS_EXECUTION_FAILED" ; break; case HIPSPARSE_STATUS_INTERNAL_ERROR: std::cerr << "HIPSPARSE_STATUS_INTERNAL_ERROR" ; break; case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: std::cerr << "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED" ; break; default: std::cerr << "unknown CUSPARSE error" ; } } }; unsigned long long getGFlopsOfDeviceId (unsigned int deviceID) { int sm_per_multiproc = 0; unsigned long long gflops = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, deviceID); // If this GPU is not running on Compute Mode prohibited, then we can add it to the list if (deviceProp.computeMode != hipComputeModeProhibited) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } gflops = (unsigned long long) deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; } return gflops; } // This function returns the best GPU (with maximum GFLOPS) int getMaxGflopsDeviceId() { int current_device = 0; int max_perf_device = 0; int device_count = 0; int devices_prohibited = 0; unsigned long long max_compute_perf = 0; hipGetDeviceCount(&device_count); checkCudaErrors(hipGetDeviceCount(&device_count)); if (device_count == 0) { fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } if (devices_prohibited == device_count) { fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error: all devices have compute mode prohibited.\n"); exit(EXIT_FAILURE); } // Find the best CUDA capable GPU device current_device = 0; while (current_device < device_count) { unsigned long long compute_perf = getGFlopsOfDeviceId (current_device); if (compute_perf > max_compute_perf) { max_compute_perf = compute_perf; max_perf_device = current_device; } ++current_device; } return max_perf_device; }
5046d665bb150240105fbda7dc300600effc131c.cu
#include "gpuHelpers.h" #include <iostream> #include <helper_cuda.h> void checkCublas(cublasStatus_t s) { if (s != CUBLAS_STATUS_SUCCESS) { switch (s) { case CUBLAS_STATUS_ALLOC_FAILED: std::cerr << "CUBLAS_STATUS_ALLOC_FAILED" ; break; case CUBLAS_STATUS_ARCH_MISMATCH: std::cerr << "CUBLAS_STATUS_ARCH_MISMATCH" ; break; case CUBLAS_STATUS_EXECUTION_FAILED: std::cerr << "CUBLAS_STATUS_EXECUTION_FAILED" ; break; case CUBLAS_STATUS_INTERNAL_ERROR: std::cerr << "CUBLAS_STATUS_INTERNAL_ERROR" ; break; case CUBLAS_STATUS_INVALID_VALUE: std::cerr << "CUBLAS_STATUS_INVALID_VALUE" ; break; case CUBLAS_STATUS_MAPPING_ERROR: std::cerr << "CUBLAS_STATUS_MAPPING_ERROR" ; break; case CUBLAS_STATUS_NOT_INITIALIZED: std::cerr << "CUBLAS_STATUS_NOT_INITIALIZED" ; break; default: std::cerr << "CUBLAS_UNKNOWN_ERROR" ; } }; } void checkCusparse(cusparseStatus_t s) { if (s != CUSPARSE_STATUS_SUCCESS) { switch (s) { case CUSPARSE_STATUS_NOT_INITIALIZED: std::cerr << "CUSPARSE_STATUS_NOT_INITIALIZED" ; break; case CUSPARSE_STATUS_ALLOC_FAILED: std::cerr << "CUSPARSE_STATUS_ALLOC_FAILED" ; break; case CUSPARSE_STATUS_INVALID_VALUE: std::cerr << "CUSPARSE_STATUS_INVALID_VALUE" ; break; case CUSPARSE_STATUS_ARCH_MISMATCH: std::cerr << "CUSPARSE_STATUS_ARCH_MISMATCH" ; break; case CUSPARSE_STATUS_MAPPING_ERROR: std::cerr << "CUSPARSE_STATUS_MAPPING_ERROR" ; break; case CUSPARSE_STATUS_EXECUTION_FAILED: std::cerr << "CUSPARSE_STATUS_EXECUTION_FAILED" ; break; case CUSPARSE_STATUS_INTERNAL_ERROR: std::cerr << "CUSPARSE_STATUS_INTERNAL_ERROR" ; break; case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: std::cerr << "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED" ; break; default: std::cerr << "unknown CUSPARSE error" ; } } }; unsigned long long getGFlopsOfDeviceId (unsigned int deviceID) { int sm_per_multiproc = 0; unsigned long long gflops = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, deviceID); // If this GPU is not running on Compute Mode prohibited, then we can add it to the list if (deviceProp.computeMode != cudaComputeModeProhibited) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } gflops = (unsigned long long) deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; } return gflops; } // This function returns the best GPU (with maximum GFLOPS) int getMaxGflopsDeviceId() { int current_device = 0; int max_perf_device = 0; int device_count = 0; int devices_prohibited = 0; unsigned long long max_compute_perf = 0; cudaGetDeviceCount(&device_count); checkCudaErrors(cudaGetDeviceCount(&device_count)); if (device_count == 0) { fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } if (devices_prohibited == device_count) { fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error: all devices have compute mode prohibited.\n"); exit(EXIT_FAILURE); } // Find the best CUDA capable GPU device current_device = 0; while (current_device < device_count) { unsigned long long compute_perf = getGFlopsOfDeviceId (current_device); if (compute_perf > max_compute_perf) { max_compute_perf = compute_perf; max_perf_device = current_device; } ++current_device; } return max_perf_device; }
eac7d12997f2752df1a3c068b8c86fb2e1ec1025.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaTools.h" #include "Device.h" #include <iostream> using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void addVecteurGPU(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __host__ void addVecteur(float* ptrV1, float* ptrV2, float* ptrW,int n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __host__ void addVecteur(float* ptrV1, float* ptrV2, float* ptrW, int n) { float* ptrDevV1 = NULL; float* ptrDevV2 = NULL; float* ptrDevW = NULL; size_t size = n * sizeof(float); // octet HANDLE_ERROR(hipMalloc(&ptrDevV1, size)); HANDLE_ERROR(hipMalloc(&ptrDevV2, size)); HANDLE_ERROR(hipMalloc(&ptrDevW, size)); HANDLE_ERROR(hipMemset(ptrDevW, 0, size)); HANDLE_ERROR(hipMemcpy(ptrDevV1, ptrV1, size, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(ptrDevV2, ptrV2, size, hipMemcpyHostToDevice)); dim3 dg(16, 2, 1); // disons dim3 db(32, 4, 1); // disons // Debug //Device::print(dg, db); Device::checkDimError(dg, db); hipLaunchKernelGGL(( addVecteurGPU), dim3(dg),dim3(db), 0, 0, ptrDevV1, ptrDevV2, ptrDevW, n); // assynchrone Device::checkLastCudaError("addVecteur"); // facultatif Device::synchronize(); // Temp, only for printf in GPU HANDLE_ERROR(hipMemcpy(ptrW, ptrDevW, size, hipMemcpyDeviceToHost)); // barrire synchronisation implicite HANDLE_ERROR(hipFree(ptrDevV1)); HANDLE_ERROR(hipFree(ptrDevV2)); HANDLE_ERROR(hipFree(ptrDevW)); // TODO ptrV2 et ptrW } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
eac7d12997f2752df1a3c068b8c86fb2e1ec1025.cu
#include "cudaTools.h" #include "Device.h" #include <iostream> using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void addVecteurGPU(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __host__ void addVecteur(float* ptrV1, float* ptrV2, float* ptrW,int n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __host__ void addVecteur(float* ptrV1, float* ptrV2, float* ptrW, int n) { float* ptrDevV1 = NULL; float* ptrDevV2 = NULL; float* ptrDevW = NULL; size_t size = n * sizeof(float); // octet HANDLE_ERROR(cudaMalloc(&ptrDevV1, size)); HANDLE_ERROR(cudaMalloc(&ptrDevV2, size)); HANDLE_ERROR(cudaMalloc(&ptrDevW, size)); HANDLE_ERROR(cudaMemset(ptrDevW, 0, size)); HANDLE_ERROR(cudaMemcpy(ptrDevV1, ptrV1, size, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(ptrDevV2, ptrV2, size, cudaMemcpyHostToDevice)); dim3 dg(16, 2, 1); // disons dim3 db(32, 4, 1); // disons // Debug //Device::print(dg, db); Device::checkDimError(dg, db); addVecteurGPU<<<dg,db>>>(ptrDevV1, ptrDevV2, ptrDevW, n); // assynchrone Device::checkLastCudaError("addVecteur"); // facultatif Device::synchronize(); // Temp, only for printf in GPU HANDLE_ERROR(cudaMemcpy(ptrW, ptrDevW, size, cudaMemcpyDeviceToHost)); // barri�re synchronisation implicite HANDLE_ERROR(cudaFree(ptrDevV1)); HANDLE_ERROR(cudaFree(ptrDevV2)); HANDLE_ERROR(cudaFree(ptrDevW)); // TODO ptrV2 et ptrW } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
c47f8e70e389f64ed831acf4d66f21f4621de6e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 // ELL SpMV kernel //Michael Garland template<bool betazero> __global__ void zgeelltmv_kernel( int num_rows, int num_cols, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; magmaDoubleComplex val = dval [ num_rows * n + row ]; //if ( val != MAGMA_Z_ZERO ) dot += val * dx[col ]; } if (betazero) { dy[ row ] = dot * alpha; } else { dy[ row ] = dot * alpha + beta * dy [ row ]; } } } // shifted ELL SpMV kernel //Michael Garland __global__ void zgeelltmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; magmaDoubleComplex val = dval [ num_rows * n + row ]; if ( val != 0) dot += val * dx[col ]; } if ( row < blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_z ********************************************************************/ extern "C" magma_int_t magma_zgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; if (beta == MAGMA_Z_ZERO) { hipLaunchKernelGGL(( zgeelltmv_kernel<true>), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgeelltmv_kernel<false>), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] lambda magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgeelltmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magma_int_t offset, magma_int_t blocksize, magmaIndex_ptr addrows, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; magmaDoubleComplex tmp_shift; //magma_zsetvector(1,&lambda,1,&tmp_shift,1); tmp_shift = lambda; hipLaunchKernelGGL(( zgeelltmv_kernel_shift), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx, beta, offset, blocksize, addrows, dy ); return MAGMA_SUCCESS; }
c47f8e70e389f64ed831acf4d66f21f4621de6e0.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 // ELL SpMV kernel //Michael Garland template<bool betazero> __global__ void zgeelltmv_kernel( int num_rows, int num_cols, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; magmaDoubleComplex val = dval [ num_rows * n + row ]; //if ( val != MAGMA_Z_ZERO ) dot += val * dx[col ]; } if (betazero) { dy[ row ] = dot * alpha; } else { dy[ row ] = dot * alpha + beta * dy [ row ]; } } } // shifted ELL SpMV kernel //Michael Garland __global__ void zgeelltmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, int offset, int blocksize, magma_index_t * addrows, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; magmaDoubleComplex val = dval [ num_rows * n + row ]; if ( val != 0) dot += val * dx[col ]; } if ( row < blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_z ********************************************************************/ extern "C" magma_int_t magma_zgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; if (beta == MAGMA_Z_ZERO) { zgeelltmv_kernel<true><<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { zgeelltmv_kernel<false><<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] lambda magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgeelltmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex lambda, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magma_int_t offset, magma_int_t blocksize, magmaIndex_ptr addrows, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; magmaDoubleComplex tmp_shift; //magma_zsetvector(1,&lambda,1,&tmp_shift,1); tmp_shift = lambda; zgeelltmv_kernel_shift<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx, beta, offset, blocksize, addrows, dy ); return MAGMA_SUCCESS; }
7012a5676d46d48893af81f95141bf31d93f464f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sixth.h" #include "nbody.h" #include "cutil.h" Sixth::Sixth() { #ifndef EXT_DATA genInitialConditions(); #ifdef MASS massFunction(); #else cout << "Sixth equal masses \n"; equalMasses(); #endif // MASS normalization(); /******* Print initial positions ****************/ print( "./Data/sixth0.txt", pos, vel ); /************************************************/ #else readExtData( "./Data/ExtData.txt" ); #endif // EXT_DATA pos1 = new double4 [N]; #ifdef DS_SIXTH pos1_temp = new DS4 [N]; acc1 = new float4 [N]; #else acc1 = new double4 [N]; #endif /**************** Sixth order coefficients *******/ C = new double [8]; C[0] = 0.392256805238780; C[1] = 0.510043411918458; C[2] =-0.471053385409758; C[3] = 0.0687531682525198; C[4] = 0.0687531682525198; C[5] =-0.471053385409758; C[6] = 0.510043411918458; C[7] = 0.392256805238780; D = new double [8]; D[0] = 0.784513610477560; D[1] = 0.235573213359357; D[2] =-1.17767998417887; D[3] = 1.31518632068391; D[4] =-1.17767998417887; D[5] = 0.235573213359357; D[6] = 0.784513610477560; D[7] = 0.; /*************************************************/ for (int i = 0; i < N; i++){ pos1[i].w = pos[i].w; } E0 = kinEnergy() + potEnergy( pos, M ); L0 = angMomentum( pos, vel ); } Sixth::~Sixth() { delete [] pos1; #ifdef DS_SIXTH delete [] pos1_temp; #endif delete [] C; delete [] D; delete [] acc1; } /**************************************************************/ /* SIXTH ORDER INTEGRATION METHOD */ /**************************************************************/ void Sixth::integration() { /******** Variables for the evalutation of the loop time ****/ clock_t start, end; struct timeval tv; int secStart, secEnd; int microSecStart, microSecEnd; double time; /************************************************************/ int t_i = ((int) n/FRAME); int q = 1; int i; int j; #ifdef _OMP omp_set_dynamic( NUM_THREADS ); #endif #ifndef POTENTIAL double R2, r2, r; double a, b, c; double axG_t, ayG_t, azG_t; mBulge = 1.40592e10 / M; mDisk = 8.56080e10 / M; mHalo = 10.70680e10 / M; bB = 387.3 / rCl; aD = 5317.8 / rCl; bD = 250.0 / rCl; aH = 12000.0 / rCl; #endif /****************** START CLOCK *****************************/ start = clock(); gettimeofday(&tv, NULL); secStart = tv.tv_sec; microSecStart = tv.tv_usec; /************************ START LOOP ************************/ for (int k = 1; k <= n; k++){ for(int l = 0; l < 8; l++){ #pragma omp parallel default(shared) private(i) { #pragma omp for for (i = 0; i < N; i++){ pos1[i].x = pos[i].x + C[l] * vel[i].u * h; pos1[i].y = pos[i].y + C[l] * vel[i].v * h; pos1[i].z = pos[i].z + C[l] * vel[i].w * h; #ifdef DS_SIXTH pos1_temp[i] = (DS4){ to_DS( pos1[i].x ), to_DS( pos1[i].y ), to_DS( pos1[i].z ), to_DS( pos1[i].w ) }; #endif } } if(l != 7){ #ifdef _OMP omp_set_num_threads( NUM_GPUS ); #endif #pragma omp parallel default( shared ) { /*********************************************************************/ unsigned int cpu_thread_id = omp_get_thread_num(); unsigned int num_cpu_threads = omp_get_num_threads(); int gpu_id = -1; /* * The device of the GPUs are set from hipSetDevice( 1 ) to * hipSetDevice( ngpus ) where ngpus in the number of the n-th * GPU device allowed. */ CUDA_SAFE_CALL( hipSetDevice(cpu_thread_id % num_cpu_threads + 1 ) ); CUDA_SAFE_CALL( hipGetDevice(&gpu_id) ); /*********************************************************************/ #ifdef SIXTH_METHOD double4 *accD; CUDA_SAFE_CALL( hipMalloc( ( void** )&accD, N * sizeof( double4 ) ) ); double4 *posD; CUDA_SAFE_CALL( hipMalloc( ( void** )&posD, N * sizeof( double4 ) ) ); #else float4 *accD; CUDA_SAFE_CALL( hipMalloc( ( void** )&accD, N * sizeof( float4 ) ) ); DS4 *posD; CUDA_SAFE_CALL( hipMalloc( ( void** )&posD, N * sizeof( DS4 ) ) ); #endif /*********************************************************************/ int istart = cpu_thread_id * N/num_cpu_threads; #ifdef SIXTH_METHOD double4 *sub_a = acc1 + cpu_thread_id * N/ num_cpu_threads; #else float4 *sub_a = acc1 + cpu_thread_id * N/ num_cpu_threads; #endif /********************************************************************/ dim3 gpu_threads( THREADS_PER_BLOCK ); dim3 gpu_blocks( N / ( gpu_threads.x * num_cpu_threads ) ); #ifdef SIXTH_METHOD int sharedMem = sizeof( double4 ) * gpu_threads.x; CUDA_SAFE_CALL( hipMemcpy( posD, pos1, N * sizeof( double4 ), hipMemcpyHostToDevice ) ); #else int sharedMem = sizeof( DS4 ) * gpu_threads.x; CUDA_SAFE_CALL( hipMemcpy( posD, pos1_temp, N * sizeof( DS4 ), hipMemcpyHostToDevice ) ); #endif /*******************************************************************/ hipLaunchKernelGGL(( calculate_forces) , dim3(gpu_blocks), dim3(gpu_threads), sharedMem , 0, posD, accD, rCl, M, istart ); #ifdef SIXTH_METHOD CUDA_SAFE_CALL( hipMemcpy( sub_a, accD, ( N / num_cpu_threads ) * sizeof( double4 ), hipMemcpyDeviceToHost ) ); #else CUDA_SAFE_CALL( hipMemcpy( sub_a, accD, ( N / num_cpu_threads ) * sizeof( float4 ), hipMemcpyDeviceToHost) ); #endif CUDA_SAFE_CALL( hipFree( accD )); CUDA_SAFE_CALL( hipFree( posD )); } #ifdef _OMP omp_set_num_threads( NUM_THREADS ); #endif } #pragma omp parallel default( shared ) private( i ) { #ifndef POTENTIAL #pragma omp for private( R2, r2, r, a, b, c, axG_t, ayG_t, azG_t ) for( i = 0; i < N; i++ ){ R2 = pos1[i].x*pos1[i].x + pos1[i].y*pos1[i].y; r2 = pos1[i].x*pos1[i].x + pos1[i].y*pos1[i].y + pos1[i].z*pos1[i].z; r = sqrt(r2); a = pow((r2 + bB*bB),1.5); axG_t = -mBulge * pos1[i].x / a; ayG_t = -mBulge * pos1[i].y / a; azG_t = -mBulge * pos1[i].z / a; b = pow(R2 + (aD + sqrt(bD*bD + pos1[i].z*pos1[i].z))*(aD+ sqrt(bD*bD + pos1[i].z*pos1[i].z)),1.5); axG_t -= mDisk * pos1[i].x / b; ayG_t -= mDisk * pos1[i].y / b; azG_t -= mDisk * pos1[i].z *(aD+ sqrt(bD*bD + pos1[i].z*pos1[i].z))/(sqrt(bD*bD + pos1[i].z*pos1[i].z) * pow(R2+ (aD+ sqrt(bD*bD + pos1[i].z*pos1[i].z))*(aD+ sqrt(bD*bD + pos1[i].z*pos1[i].z)),1.5)); /*************** see Espresate,[2002] *******************/ c = pow(r/aH,0.02)/((aH*aH)*r* (1. + pow(r/aH,1.02))); axG_t -= mHalo * pos1[i].x * c; ayG_t -= mHalo * pos1[i].y * c; azG_t -= mHalo * pos1[i].z * c; acc1[i].x += axG_t; acc1[i].y += ayG_t; acc1[i].z += azG_t; } #endif #pragma omp for for(i = 0; i < N; i++){ vel[i].u = vel[i].u + D[l] * h * acc1[i].x; pos[i].x = pos1[i].x; vel[i].v = vel[i].v + D[l] * h * acc1[i].y; pos[i].y = pos1[i].y; vel[i].w = vel[i].w + D[l] * h * acc1[i].z; pos[i].z = pos1[i].z; } } } /***********************************************************/ if( k == q*t_i && q < FRAME+1 ) { print2("./Data/sixth", pos1, vel, q); q++; } /***********************************************************/ } /************************* END LOOP *************************/ end = clock(); gettimeofday(&tv, NULL); secEnd = tv.tv_sec; microSecEnd = tv.tv_usec; /************ Evaluation of the distances *******************/ time = (((double)(end-start))/((double)CLOCKS_PER_SEC)); cout << setprecision(10); cout << "Time clock: " << time << " Get time of day: "; cout << secEnd + microSecEnd * 0.000001 - secStart - microSecStart*0.000001 << endl; E = kinEnergy() + potEnergy( pos, M ); L = angMomentum( pos, vel ); cout << "E0 = " << E0 << "; E = " << E << "; DE =(E-E0)/E0 = " << (E-E0)/fabs(E0) << endl; cout << "L0 = " << L0 << "; L = " << L << "; DL =(L-L0)/L0 = " << (L-L0)/fabs(L0) << endl; /************** Print on the file *************************/ openFile(outES, "./Data/energiaS.txt"); outES << "####################################\n"; outES << "# SIXTH ORDER METHOD: #\n"; outES << "####################################\n"; outES << "Total Number of steps: " << n << ".\n"; outES << "Integration step: " << h << ".\n"; outES << "Cluster radius :" << rCl << ".\n"; outES << "Beta: " << beta << ".\n\n"; outES << "E0 = " << E0 << "; E = " << E << "; DE =(E-E0)/E0 = " << (E-E0)/fabs(E0) << endl; outES << "L0 = " << L0 << "; L = " << L << "; DL =(L-L0)/L0 = " << (L-L0)/fabs(L0) << endl; outES << endl; outES << "Time clock: " << time << " Get time of day: "; outES << secEnd + microSecEnd * 0.000001 - secStart - microSecStart*0.000001 << endl; closeFile(outES); /*********************************************************/ }
7012a5676d46d48893af81f95141bf31d93f464f.cu
#include "sixth.h" #include "nbody.h" #include "cutil.h" Sixth::Sixth() { #ifndef EXT_DATA genInitialConditions(); #ifdef MASS massFunction(); #else cout << "Sixth equal masses \n"; equalMasses(); #endif // MASS normalization(); /******* Print initial positions ****************/ print( "./Data/sixth0.txt", pos, vel ); /************************************************/ #else readExtData( "./Data/ExtData.txt" ); #endif // EXT_DATA pos1 = new double4 [N]; #ifdef DS_SIXTH pos1_temp = new DS4 [N]; acc1 = new float4 [N]; #else acc1 = new double4 [N]; #endif /**************** Sixth order coefficients *******/ C = new double [8]; C[0] = 0.392256805238780; C[1] = 0.510043411918458; C[2] =-0.471053385409758; C[3] = 0.0687531682525198; C[4] = 0.0687531682525198; C[5] =-0.471053385409758; C[6] = 0.510043411918458; C[7] = 0.392256805238780; D = new double [8]; D[0] = 0.784513610477560; D[1] = 0.235573213359357; D[2] =-1.17767998417887; D[3] = 1.31518632068391; D[4] =-1.17767998417887; D[5] = 0.235573213359357; D[6] = 0.784513610477560; D[7] = 0.; /*************************************************/ for (int i = 0; i < N; i++){ pos1[i].w = pos[i].w; } E0 = kinEnergy() + potEnergy( pos, M ); L0 = angMomentum( pos, vel ); } Sixth::~Sixth() { delete [] pos1; #ifdef DS_SIXTH delete [] pos1_temp; #endif delete [] C; delete [] D; delete [] acc1; } /**************************************************************/ /* SIXTH ORDER INTEGRATION METHOD */ /**************************************************************/ void Sixth::integration() { /******** Variables for the evalutation of the loop time ****/ clock_t start, end; struct timeval tv; int secStart, secEnd; int microSecStart, microSecEnd; double time; /************************************************************/ int t_i = ((int) n/FRAME); int q = 1; int i; int j; #ifdef _OMP omp_set_dynamic( NUM_THREADS ); #endif #ifndef POTENTIAL double R2, r2, r; double a, b, c; double axG_t, ayG_t, azG_t; mBulge = 1.40592e10 / M; mDisk = 8.56080e10 / M; mHalo = 10.70680e10 / M; bB = 387.3 / rCl; aD = 5317.8 / rCl; bD = 250.0 / rCl; aH = 12000.0 / rCl; #endif /****************** START CLOCK *****************************/ start = clock(); gettimeofday(&tv, NULL); secStart = tv.tv_sec; microSecStart = tv.tv_usec; /************************ START LOOP ************************/ for (int k = 1; k <= n; k++){ for(int l = 0; l < 8; l++){ #pragma omp parallel default(shared) private(i) { #pragma omp for for (i = 0; i < N; i++){ pos1[i].x = pos[i].x + C[l] * vel[i].u * h; pos1[i].y = pos[i].y + C[l] * vel[i].v * h; pos1[i].z = pos[i].z + C[l] * vel[i].w * h; #ifdef DS_SIXTH pos1_temp[i] = (DS4){ to_DS( pos1[i].x ), to_DS( pos1[i].y ), to_DS( pos1[i].z ), to_DS( pos1[i].w ) }; #endif } } if(l != 7){ #ifdef _OMP omp_set_num_threads( NUM_GPUS ); #endif #pragma omp parallel default( shared ) { /*********************************************************************/ unsigned int cpu_thread_id = omp_get_thread_num(); unsigned int num_cpu_threads = omp_get_num_threads(); int gpu_id = -1; /* * The device of the GPUs are set from cudaSetDevice( 1 ) to * cudaSetDevice( ngpus ) where ngpus in the number of the n-th * GPU device allowed. */ CUDA_SAFE_CALL( cudaSetDevice(cpu_thread_id % num_cpu_threads + 1 ) ); CUDA_SAFE_CALL( cudaGetDevice(&gpu_id) ); /*********************************************************************/ #ifdef SIXTH_METHOD double4 *accD; CUDA_SAFE_CALL( cudaMalloc( ( void** )&accD, N * sizeof( double4 ) ) ); double4 *posD; CUDA_SAFE_CALL( cudaMalloc( ( void** )&posD, N * sizeof( double4 ) ) ); #else float4 *accD; CUDA_SAFE_CALL( cudaMalloc( ( void** )&accD, N * sizeof( float4 ) ) ); DS4 *posD; CUDA_SAFE_CALL( cudaMalloc( ( void** )&posD, N * sizeof( DS4 ) ) ); #endif /*********************************************************************/ int istart = cpu_thread_id * N/num_cpu_threads; #ifdef SIXTH_METHOD double4 *sub_a = acc1 + cpu_thread_id * N/ num_cpu_threads; #else float4 *sub_a = acc1 + cpu_thread_id * N/ num_cpu_threads; #endif /********************************************************************/ dim3 gpu_threads( THREADS_PER_BLOCK ); dim3 gpu_blocks( N / ( gpu_threads.x * num_cpu_threads ) ); #ifdef SIXTH_METHOD int sharedMem = sizeof( double4 ) * gpu_threads.x; CUDA_SAFE_CALL( cudaMemcpy( posD, pos1, N * sizeof( double4 ), cudaMemcpyHostToDevice ) ); #else int sharedMem = sizeof( DS4 ) * gpu_threads.x; CUDA_SAFE_CALL( cudaMemcpy( posD, pos1_temp, N * sizeof( DS4 ), cudaMemcpyHostToDevice ) ); #endif /*******************************************************************/ calculate_forces <<< gpu_blocks, gpu_threads, sharedMem >>> ( posD, accD, rCl, M, istart ); #ifdef SIXTH_METHOD CUDA_SAFE_CALL( cudaMemcpy( sub_a, accD, ( N / num_cpu_threads ) * sizeof( double4 ), cudaMemcpyDeviceToHost ) ); #else CUDA_SAFE_CALL( cudaMemcpy( sub_a, accD, ( N / num_cpu_threads ) * sizeof( float4 ), cudaMemcpyDeviceToHost) ); #endif CUDA_SAFE_CALL( cudaFree( accD )); CUDA_SAFE_CALL( cudaFree( posD )); } #ifdef _OMP omp_set_num_threads( NUM_THREADS ); #endif } #pragma omp parallel default( shared ) private( i ) { #ifndef POTENTIAL #pragma omp for private( R2, r2, r, a, b, c, axG_t, ayG_t, azG_t ) for( i = 0; i < N; i++ ){ R2 = pos1[i].x*pos1[i].x + pos1[i].y*pos1[i].y; r2 = pos1[i].x*pos1[i].x + pos1[i].y*pos1[i].y + pos1[i].z*pos1[i].z; r = sqrt(r2); a = pow((r2 + bB*bB),1.5); axG_t = -mBulge * pos1[i].x / a; ayG_t = -mBulge * pos1[i].y / a; azG_t = -mBulge * pos1[i].z / a; b = pow(R2 + (aD + sqrt(bD*bD + pos1[i].z*pos1[i].z))*(aD+ sqrt(bD*bD + pos1[i].z*pos1[i].z)),1.5); axG_t -= mDisk * pos1[i].x / b; ayG_t -= mDisk * pos1[i].y / b; azG_t -= mDisk * pos1[i].z *(aD+ sqrt(bD*bD + pos1[i].z*pos1[i].z))/(sqrt(bD*bD + pos1[i].z*pos1[i].z) * pow(R2+ (aD+ sqrt(bD*bD + pos1[i].z*pos1[i].z))*(aD+ sqrt(bD*bD + pos1[i].z*pos1[i].z)),1.5)); /*************** see Espresate,[2002] *******************/ c = pow(r/aH,0.02)/((aH*aH)*r* (1. + pow(r/aH,1.02))); axG_t -= mHalo * pos1[i].x * c; ayG_t -= mHalo * pos1[i].y * c; azG_t -= mHalo * pos1[i].z * c; acc1[i].x += axG_t; acc1[i].y += ayG_t; acc1[i].z += azG_t; } #endif #pragma omp for for(i = 0; i < N; i++){ vel[i].u = vel[i].u + D[l] * h * acc1[i].x; pos[i].x = pos1[i].x; vel[i].v = vel[i].v + D[l] * h * acc1[i].y; pos[i].y = pos1[i].y; vel[i].w = vel[i].w + D[l] * h * acc1[i].z; pos[i].z = pos1[i].z; } } } /***********************************************************/ if( k == q*t_i && q < FRAME+1 ) { print2("./Data/sixth", pos1, vel, q); q++; } /***********************************************************/ } /************************* END LOOP *************************/ end = clock(); gettimeofday(&tv, NULL); secEnd = tv.tv_sec; microSecEnd = tv.tv_usec; /************ Evaluation of the distances *******************/ time = (((double)(end-start))/((double)CLOCKS_PER_SEC)); cout << setprecision(10); cout << "Time clock: " << time << " Get time of day: "; cout << secEnd + microSecEnd * 0.000001 - secStart - microSecStart*0.000001 << endl; E = kinEnergy() + potEnergy( pos, M ); L = angMomentum( pos, vel ); cout << "E0 = " << E0 << "; E = " << E << "; DE =(E-E0)/E0 = " << (E-E0)/fabs(E0) << endl; cout << "L0 = " << L0 << "; L = " << L << "; DL =(L-L0)/L0 = " << (L-L0)/fabs(L0) << endl; /************** Print on the file *************************/ openFile(outES, "./Data/energiaS.txt"); outES << "####################################\n"; outES << "# SIXTH ORDER METHOD: #\n"; outES << "####################################\n"; outES << "Total Number of steps: " << n << ".\n"; outES << "Integration step: " << h << ".\n"; outES << "Cluster radius :" << rCl << ".\n"; outES << "Beta: " << beta << ".\n\n"; outES << "E0 = " << E0 << "; E = " << E << "; DE =(E-E0)/E0 = " << (E-E0)/fabs(E0) << endl; outES << "L0 = " << L0 << "; L = " << L << "; DL =(L-L0)/L0 = " << (L-L0)/fabs(L0) << endl; outES << endl; outES << "Time clock: " << time << " Get time of day: "; outES << secEnd + microSecEnd * 0.000001 - secStart - microSecStart*0.000001 << endl; closeFile(outES); /*********************************************************/ }
5262a066e9af14d8af8df82c83d03afe5358a26a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void vecAdd(int n, float *a, float *b, float *c) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i+=stride) c[i] = a[i] + b[i]; } int main(void) { int N = 1<<20; float *x, *y, *z; float msec; hipEvent_t start, stop; // Allocate Unified Memory -- accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); hipMallocManaged(&z, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } hipEventCreate(&start); hipEventCreate(&stop); int blockSize = 256; int numBlocks = 12; // good enough for P620 hipEventRecord(start); hipLaunchKernelGGL(( vecAdd), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y, z); hipEventRecord(stop); // Wait for GPU to finish before accessing on host hipEventSynchronize(stop); hipEventElapsedTime(&msec, start, stop); printf("Kernel time: %f ms\n", msec); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(z[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); hipFree(z); return 0; }
5262a066e9af14d8af8df82c83d03afe5358a26a.cu
#include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void vecAdd(int n, float *a, float *b, float *c) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i+=stride) c[i] = a[i] + b[i]; } int main(void) { int N = 1<<20; float *x, *y, *z; float msec; cudaEvent_t start, stop; // Allocate Unified Memory -- accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); cudaMallocManaged(&z, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } cudaEventCreate(&start); cudaEventCreate(&stop); int blockSize = 256; int numBlocks = 12; // good enough for P620 cudaEventRecord(start); vecAdd<<<numBlocks, blockSize>>>(N, x, y, z); cudaEventRecord(stop); // Wait for GPU to finish before accessing on host cudaEventSynchronize(stop); cudaEventElapsedTime(&msec, start, stop); printf("Kernel time: %f ms\n", msec); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(z[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); cudaFree(z); return 0; }
e6d39759ba2d861940f02d48f7f81d1b5591595c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <nervCUDA.h> #include <nerv_kernels.h> /* Method used to evaluate the cost function when starting from the hx and yy matrices. */ template <class T, unsigned int blockSize, bool nIsPow2> __global__ void ComputeLength2(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { T val = g_idata[i]; mySum += val*val; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) { val = g_idata[i+blockSize]; mySum += val*val; } i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// template<typename T> void compute_length2(int size, int threads, int blocks, int whichKernel, T *d_idata, T *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch if (isPow2(size)) { switch (threads) { case 512: hipLaunchKernelGGL(( ComputeLength2<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( ComputeLength2<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( ComputeLength2<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( ComputeLength2<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( ComputeLength2<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( ComputeLength2<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( ComputeLength2<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( ComputeLength2<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( ComputeLength2<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( ComputeLength2<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } } else { switch (threads) { case 512: hipLaunchKernelGGL(( ComputeLength2<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( ComputeLength2<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( ComputeLength2<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( ComputeLength2<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( ComputeLength2<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( ComputeLength2<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( ComputeLength2<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( ComputeLength2<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( ComputeLength2<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( ComputeLength2<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } } } template<typename T> T compute_length2_device(T* d_vec, T* d_odata, unsigned int n) { int maxThreads = 256; int maxBlocks = 64; int whichKernel = 6; // bool cpuFinalReduction = false; int cpuFinalThreshold = 1; int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(whichKernel, n, maxBlocks, maxThreads, numBlocks, numThreads); // Allocate output array: // size_t size = numBlocks*sizeof(T); // T* d_odata = NULL; // checkCudaErrors(hipMalloc(&d_odata, size)); // Allocate mem for the result on host side T *h_odata = (T *) malloc(numBlocks*sizeof(T)); T gpu_result = 0.0; bool needReadBack = true; // execute the kernel compute_length2(n, numThreads, numBlocks, whichKernel, d_vec, d_odata); // sum partial block sums on GPU int s=numBlocks; int kernel = whichKernel; while (s > cpuFinalThreshold) { int threads = 0, blocks = 0; getNumBlocksAndThreads(kernel, s, maxBlocks, maxThreads, blocks, threads); reduce_sum_launcher(s, threads, blocks, kernel, d_odata, d_odata); if (kernel < 3) { s = (s + threads - 1) / threads; } else { s = (s + (threads*2-1)) / (threads*2); } } if (s > 1) { // copy result from device to host checkCudaErrors(hipMemcpy(h_odata, d_odata, s * sizeof(T), hipMemcpyDeviceToHost)); for (int i=0; i < s; i++) { gpu_result += h_odata[i]; } needReadBack = false; } if (needReadBack) { // copy final sum from device to host checkCudaErrors(hipMemcpy(&gpu_result, d_odata, sizeof(T), hipMemcpyDeviceToHost)); } // Free host memory: free(h_odata); // Free device memory // checkCudaErrors(hipFree(d_odata)); return gpu_result; } template <typename T> T _compute_length2(T* vec, unsigned int n) { size_t size; size = n * sizeof(T); T* d_tmp = NULL; checkCudaErrors(hipMalloc(&d_tmp, size)); T* d_vec = NULL; checkCudaErrors(hipMalloc(&d_vec, size)); checkCudaErrors(hipMemcpy(d_vec, vec, size, hipMemcpyHostToDevice)); T res = compute_length2_device(d_vec, d_tmp, n); checkCudaErrors(hipFree(d_tmp)); checkCudaErrors(hipFree(d_vec)); return res; } extern "C" { double compute_length2(double* vec, unsigned int n) { return _compute_length2(vec,n); } float compute_length2_f(float* vec, unsigned int n) { return _compute_length2(vec,n); } }
e6d39759ba2d861940f02d48f7f81d1b5591595c.cu
#include <nervCUDA.h> #include <nerv_kernels.h> /* Method used to evaluate the cost function when starting from the hx and yy matrices. */ template <class T, unsigned int blockSize, bool nIsPow2> __global__ void ComputeLength2(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { T val = g_idata[i]; mySum += val*val; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) { val = g_idata[i+blockSize]; mySum += val*val; } i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// template<typename T> void compute_length2(int size, int threads, int blocks, int whichKernel, T *d_idata, T *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch if (isPow2(size)) { switch (threads) { case 512: ComputeLength2<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: ComputeLength2<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: ComputeLength2<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: ComputeLength2<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: ComputeLength2<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: ComputeLength2<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: ComputeLength2<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: ComputeLength2<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: ComputeLength2<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: ComputeLength2<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } else { switch (threads) { case 512: ComputeLength2<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: ComputeLength2<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: ComputeLength2<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: ComputeLength2<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: ComputeLength2<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: ComputeLength2<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: ComputeLength2<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: ComputeLength2<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: ComputeLength2<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: ComputeLength2<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } } template<typename T> T compute_length2_device(T* d_vec, T* d_odata, unsigned int n) { int maxThreads = 256; int maxBlocks = 64; int whichKernel = 6; // bool cpuFinalReduction = false; int cpuFinalThreshold = 1; int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(whichKernel, n, maxBlocks, maxThreads, numBlocks, numThreads); // Allocate output array: // size_t size = numBlocks*sizeof(T); // T* d_odata = NULL; // checkCudaErrors(cudaMalloc(&d_odata, size)); // Allocate mem for the result on host side T *h_odata = (T *) malloc(numBlocks*sizeof(T)); T gpu_result = 0.0; bool needReadBack = true; // execute the kernel compute_length2(n, numThreads, numBlocks, whichKernel, d_vec, d_odata); // sum partial block sums on GPU int s=numBlocks; int kernel = whichKernel; while (s > cpuFinalThreshold) { int threads = 0, blocks = 0; getNumBlocksAndThreads(kernel, s, maxBlocks, maxThreads, blocks, threads); reduce_sum_launcher(s, threads, blocks, kernel, d_odata, d_odata); if (kernel < 3) { s = (s + threads - 1) / threads; } else { s = (s + (threads*2-1)) / (threads*2); } } if (s > 1) { // copy result from device to host checkCudaErrors(cudaMemcpy(h_odata, d_odata, s * sizeof(T), cudaMemcpyDeviceToHost)); for (int i=0; i < s; i++) { gpu_result += h_odata[i]; } needReadBack = false; } if (needReadBack) { // copy final sum from device to host checkCudaErrors(cudaMemcpy(&gpu_result, d_odata, sizeof(T), cudaMemcpyDeviceToHost)); } // Free host memory: free(h_odata); // Free device memory // checkCudaErrors(cudaFree(d_odata)); return gpu_result; } template <typename T> T _compute_length2(T* vec, unsigned int n) { size_t size; size = n * sizeof(T); T* d_tmp = NULL; checkCudaErrors(cudaMalloc(&d_tmp, size)); T* d_vec = NULL; checkCudaErrors(cudaMalloc(&d_vec, size)); checkCudaErrors(cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice)); T res = compute_length2_device(d_vec, d_tmp, n); checkCudaErrors(cudaFree(d_tmp)); checkCudaErrors(cudaFree(d_vec)); return res; } extern "C" { double compute_length2(double* vec, unsigned int n) { return _compute_length2(vec,n); } float compute_length2_f(float* vec, unsigned int n) { return _compute_length2(vec,n); } }
50a16eab66190c40799c00654cccdddb841aa779.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Macro.h" #ifdef GPU // checks // one must define RED_NTHREAD for the reduction kernel in advance since we use the static shared memory #ifndef RED_NTHREAD # error : ERROR : RED_NTHREAD is not defined in BlockReduction_Shuffle !! #endif // WARP_SIZE must be defined to be the same as the CUDA predefined constant "warpSize" #ifndef WARP_SIZE # error : ERROR : WARP_SIZE is not defined in BlockReduction_Shuffle !! #endif // RED_NTHREAD must be a multiple of WARP_SIZE #if ( RED_NTHREAD % WARP_SIZE != 0 ) # error : ERROR : RED_NTHREAD must be a multiple of WARP_SIZE !! #endif // define the reduction operation here #if defined RED_SUM # define RED( a, b ) ( (a) + (b) ) #elif defined RED_MAX # define RED( a, b ) MAX( (a), (b) ) #elif defined RED_MIN # define RED( a, b ) MIN( (a), (b) ) #else # error : undefined reduction operation !! #endif //------------------------------------------------------------------------------------------------------- // Function : WarpReduction_Shuffle // Description : GPU reduction within each warp using the register shuffling // // Note : 1. Invoked by BlockReduction_Shuffle // 2. Only thread 0 will hold the correct result // // Parameter : val : Per-thread value for the reduction // // Return value: Reduction of "val" within each warp //--------------------------------------------------------------------------------------------------- __inline__ __device__ real WarpReduction_Shuffle( real val ) { for (int offset=WARP_SIZE/2; offset>0; offset/=2) { // this line somehow fails on K20X for RED_MAX (and RED_MIN likely) // --> perhaps it's because when using "val = (val > __shfl_down(val,offset) ) ? val : __shfl_down(val,offset);" // the second " __shfl_down(val,offset)" becomes ill-defined since "val" in other threads might be modified in advance // if these threads have "(val > __shfl_down(val,offset) )" // val = RED( val, __shfl_down(val,offset,WARP_SIZE) ); // use this approach instead to invoke "__shfl_down(val,offset, WARP_SIZE)" only once const real tmp = __shfl_down( val, offset, WARP_SIZE ); val = RED( val, tmp ); } return val; } // FUNCTION : WarpReduction_Shuffle //------------------------------------------------------------------------------------------------------- // Function : BlockReduction_Shuffle // Description : GPU reduction within each thread block using the register shuffling // // Note : 1. Improve reduction performance by register shuffling // 2. Reference: https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ // --> By Justin Luitjens // 3. Assuming warp size == 32 // 4. Must define RED_NTHREAD in advance since we use the static shared memory // --> RED_NTHREAD must be a multiple of the warp size // 5. Must define either RED_SUM, RED_MAX, or RED_MIN in advance to determine the reduction operation // 6. Only thread 0 will hold the correct result after calling this function // // Parameter : val : Per-thread value for the reduction // // Return value: Reduction of "val" within each thread block //--------------------------------------------------------------------------------------------------- __inline__ __device__ real BlockReduction_Shuffle( real val ) { const uint tid_x = threadIdx.x; const uint tid_y = threadIdx.y; const uint tid_z = threadIdx.z; const uint bdim_x = blockDim.x; const uint bdim_y = blockDim.y; const uint ID = __umul24( tid_z, __umul24(bdim_x,bdim_y) ) + __umul24( tid_y, bdim_x ) + tid_x; const int lane = ID % WARP_SIZE; // local lane ID within a warp [0 ... WARP_SIZE-1] const int wid = ID / WARP_SIZE; // warp ID const int MaxNWarp = 32; // maximum number of warps allowed == MaxBlockSize/WARP_SIZE == 1024/32 == 32 // --> all current compute capabilities have MaxBlockSize==1024 and WARP_SIZE==32 const int NWarp = RED_NTHREAD/WARP_SIZE; // actual number of warps (which must be <= WARP_SIZE since we apply the // final reduction only to the first warp) static __shared__ real shared[MaxNWarp]; // maximum shared memory required for 32 partial sums (must be <= WARP_SIZE) // perform reduction within each warp val = WarpReduction_Shuffle( val ); // write reduced value to the shared memory if ( lane == 0 ) shared[wid] = val; // wait for all partial reductions __syncthreads(); // here we have assumed that NWarp < WARP_SIZE if ( wid == 0 ) { // read from the shared memory only if that warp exists val = ( ID < NWarp ) ? shared[lane] : # if defined RED_SUM (real)0.0; # elif defined RED_MAX (real)-HUGE_NUMBER; # elif defined RED_MIN (real)+HUGE_NUMBER; # else # error : undefined reduction operation !! # endif // final reduction within first warp val = WarpReduction_Shuffle( val ); } return val; } // FUNCTION : BlockReduction_Shuffle #endif // #ifdef GPU
50a16eab66190c40799c00654cccdddb841aa779.cu
#include "Macro.h" #ifdef GPU // checks // one must define RED_NTHREAD for the reduction kernel in advance since we use the static shared memory #ifndef RED_NTHREAD # error : ERROR : RED_NTHREAD is not defined in BlockReduction_Shuffle !! #endif // WARP_SIZE must be defined to be the same as the CUDA predefined constant "warpSize" #ifndef WARP_SIZE # error : ERROR : WARP_SIZE is not defined in BlockReduction_Shuffle !! #endif // RED_NTHREAD must be a multiple of WARP_SIZE #if ( RED_NTHREAD % WARP_SIZE != 0 ) # error : ERROR : RED_NTHREAD must be a multiple of WARP_SIZE !! #endif // define the reduction operation here #if defined RED_SUM # define RED( a, b ) ( (a) + (b) ) #elif defined RED_MAX # define RED( a, b ) MAX( (a), (b) ) #elif defined RED_MIN # define RED( a, b ) MIN( (a), (b) ) #else # error : undefined reduction operation !! #endif //------------------------------------------------------------------------------------------------------- // Function : WarpReduction_Shuffle // Description : GPU reduction within each warp using the register shuffling // // Note : 1. Invoked by BlockReduction_Shuffle // 2. Only thread 0 will hold the correct result // // Parameter : val : Per-thread value for the reduction // // Return value: Reduction of "val" within each warp //--------------------------------------------------------------------------------------------------- __inline__ __device__ real WarpReduction_Shuffle( real val ) { for (int offset=WARP_SIZE/2; offset>0; offset/=2) { // this line somehow fails on K20X for RED_MAX (and RED_MIN likely) // --> perhaps it's because when using "val = (val > __shfl_down(val,offset) ) ? val : __shfl_down(val,offset);" // the second " __shfl_down(val,offset)" becomes ill-defined since "val" in other threads might be modified in advance // if these threads have "(val > __shfl_down(val,offset) )" // val = RED( val, __shfl_down(val,offset,WARP_SIZE) ); // use this approach instead to invoke "__shfl_down(val,offset, WARP_SIZE)" only once const real tmp = __shfl_down( val, offset, WARP_SIZE ); val = RED( val, tmp ); } return val; } // FUNCTION : WarpReduction_Shuffle //------------------------------------------------------------------------------------------------------- // Function : BlockReduction_Shuffle // Description : GPU reduction within each thread block using the register shuffling // // Note : 1. Improve reduction performance by register shuffling // 2. Reference: https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ // --> By Justin Luitjens // 3. Assuming warp size == 32 // 4. Must define RED_NTHREAD in advance since we use the static shared memory // --> RED_NTHREAD must be a multiple of the warp size // 5. Must define either RED_SUM, RED_MAX, or RED_MIN in advance to determine the reduction operation // 6. Only thread 0 will hold the correct result after calling this function // // Parameter : val : Per-thread value for the reduction // // Return value: Reduction of "val" within each thread block //--------------------------------------------------------------------------------------------------- __inline__ __device__ real BlockReduction_Shuffle( real val ) { const uint tid_x = threadIdx.x; const uint tid_y = threadIdx.y; const uint tid_z = threadIdx.z; const uint bdim_x = blockDim.x; const uint bdim_y = blockDim.y; const uint ID = __umul24( tid_z, __umul24(bdim_x,bdim_y) ) + __umul24( tid_y, bdim_x ) + tid_x; const int lane = ID % WARP_SIZE; // local lane ID within a warp [0 ... WARP_SIZE-1] const int wid = ID / WARP_SIZE; // warp ID const int MaxNWarp = 32; // maximum number of warps allowed == MaxBlockSize/WARP_SIZE == 1024/32 == 32 // --> all current compute capabilities have MaxBlockSize==1024 and WARP_SIZE==32 const int NWarp = RED_NTHREAD/WARP_SIZE; // actual number of warps (which must be <= WARP_SIZE since we apply the // final reduction only to the first warp) static __shared__ real shared[MaxNWarp]; // maximum shared memory required for 32 partial sums (must be <= WARP_SIZE) // perform reduction within each warp val = WarpReduction_Shuffle( val ); // write reduced value to the shared memory if ( lane == 0 ) shared[wid] = val; // wait for all partial reductions __syncthreads(); // here we have assumed that NWarp < WARP_SIZE if ( wid == 0 ) { // read from the shared memory only if that warp exists val = ( ID < NWarp ) ? shared[lane] : # if defined RED_SUM (real)0.0; # elif defined RED_MAX (real)-HUGE_NUMBER; # elif defined RED_MIN (real)+HUGE_NUMBER; # else # error : undefined reduction operation !! # endif // final reduction within first warp val = WarpReduction_Shuffle( val ); } return val; } // FUNCTION : BlockReduction_Shuffle #endif // #ifdef GPU
54ad012ab74f93324f05f0224b98a9bf2801e5b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/stat.h> #include <sys/time.h> //#include "gpu_spatial.h" #include "parser.cuh" using namespace std; #define THREAD1 128 #define BLOCK1 128 #define THREAD2 64 #define BLOCK2 128 extern hipStream_t *stream; __global__ void parseMBR(char *MBRRaw, int *MBR, int nr_polys){ int baseIdx = blockDim.x * blockIdx.x + threadIdx.x ; if (baseIdx < nr_polys) { baseIdx = baseIdx * 4 ; for (int i=0; i< 4; i++) { int dstIndx = baseIdx + i ; int srcIndx = dstIndx * 5; MBR[dstIndx] = (MBRRaw[srcIndx] - '0') * 1000 + (MBRRaw[srcIndx + 1] - '0') * 100 + (MBRRaw[srcIndx+2] - '0') * 10 + (MBRRaw[srcIndx + 3] - '0'); } } } __global__ void parseVertices(char *verticesRaw, int *offsetRaw, int* numOfVerticesInApoly, int *X, int*Y, int *offset, int nr_polys){ int polyIdx = blockDim.x * blockIdx.x + threadIdx.x ; // 1 polygon / thread if (polyIdx < nr_polys){ int numOfVertices = numOfVerticesInApoly[polyIdx]; int raw_offset = offsetRaw[polyIdx]; // raw offset in GPU memory int ver_offset = offset[polyIdx]; // vertex offset in GPU memory for (int j=0; j<numOfVertices; j++) { int srcIndx = raw_offset + j*10; X[ver_offset + j ] = (verticesRaw[srcIndx] - '0') * 1000 + (verticesRaw[srcIndx + 1] - '0') * 100 + (verticesRaw[srcIndx+2] - '0') * 10 + (verticesRaw[srcIndx + 3] - '0'); Y[ver_offset + j ] = (verticesRaw[srcIndx + 5] - '0') * 1000 + (verticesRaw[srcIndx + 6 ] - '0') * 100 + (verticesRaw[srcIndx + 7 ] - '0') * 10 + (verticesRaw[srcIndx + 8 ] - '0'); } } } struct timespec diff(struct timespec start, struct timespec end){ struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } int getSizeBasedGPUConf1(int size){ return (size/(THREAD1*BLOCK1) + 1)*THREAD1*BLOCK1; } int getSizeBasedGPUConf2(int size){ return (size/THREAD2 + 1)*THREAD2; } int alloc_poly_array(poly_array_t *polys, const int nr_polys, const int nr_vertices) { int size_mbrs = nr_polys * sizeof(mbr_t); int size_offsets = (nr_polys + 1) * sizeof(int); int size_x = nr_vertices * sizeof(int); int size_y = nr_vertices * sizeof(int); polys->mbrs = (mbr_t *)malloc(size_mbrs + size_offsets + size_x + size_y); if(!polys->mbrs) { fprintf(stderr, "failed to allocate memory for poly array\n"); exit(1); } polys->nr_polys = nr_polys; polys->nr_vertices = nr_vertices; polys->offsets = (int *)((char *)(polys->mbrs) + size_mbrs); polys->x = (int *)((char *)(polys->offsets) + size_offsets); polys->y = (int *)((char *)(polys->x) + size_x); return 0; } poly_array_t *gpu_parse(int dno, const int nv, std::vector<string> * poly_vec) { char *MBRRaw, *verticesRaw, numOfVerticesBuf[4]; char *dev_MBRRaw, *dev_verticesRaw; int *offset, *offsetRaw, *numOfVerticesInApoly; int *dev_offset, *dev_offsetRaw, *dev_numOfVerticesInApoly; int *MBR, *X, *Y ; int *dev_MBR, *dev_X, *dev_Y; static const int BUF_SIZE = 8192; char buf[BUF_SIZE]; int nr_polys = poly_vec->size(); // number of polygons int nr_vertices = nv; // number of vertices //std::cerr << "inGPU poly parsing ..." << std::endl; // cout <<"num of polygon: " <<nr_polys <<" , number of vertices:" <<nr_vertices<<endl; MBRRaw = (char *)malloc(20*nr_polys*sizeof(char)); MBR = (int *)malloc(4*nr_polys*sizeof(int)); hipSetDevice(dno); hipMalloc((void**)&dev_MBRRaw, 20*nr_polys*sizeof(char)); hipMalloc((void**)&dev_MBR, 4*nr_polys*sizeof(int)); verticesRaw = (char *)malloc(nr_vertices * 10); offset = (int *)malloc(nr_polys*sizeof(int)); offsetRaw = (int *)malloc(nr_polys*sizeof(int)); numOfVerticesInApoly = (int *)malloc(nr_polys*sizeof(int)); hipMalloc((void**)&dev_verticesRaw, nr_vertices * 10); hipMalloc((void**)&dev_offsetRaw, nr_polys*sizeof(int)); hipMalloc((void**)&dev_numOfVerticesInApoly, nr_polys*sizeof(int)); hipMalloc((void**)&dev_offset, nr_polys*sizeof(int)); int rawBufferIndx = 0; int vertexIndx = 0; int numVertices = 0; /* each iteration process a line */ std::size_t len =0 ; for (int i= 0 ; i<nr_polys; i++) { len = (*poly_vec)[i].copy(buf, BUF_SIZE); buf[len] = '\0'; memcpy(numOfVerticesBuf, buf, 4); // memcpy(MBRRaw + 20*i, buf + 5, 20); offset[i] = vertexIndx; offsetRaw[i] = rawBufferIndx; numVertices = atoi(numOfVerticesBuf); numOfVerticesInApoly[i] = numVertices; memcpy(verticesRaw + rawBufferIndx, buf + 25, numVertices * 10); vertexIndx += numVertices; rawBufferIndx += len - 25; //break; //cout<<atoi(numOfVerticesBuf)<<endl; } //cout<<getSizeBasedGPUConf2(nr_vertices*sizeof(int))<<" "<<(nr_vertices*sizeof(int)/THREAD2 + 1 )*THREAD2 // <<" "<<numVerticesInGPUMem<<" "<<nr_polys/BLOCK2 + 1<<" "<<rawBufferIndx<<endl; //cout<<verticesRaw + offsetRaw[nr_polys-1]<<endl; //cout<<verticesRaw<<endl; X = (int *)malloc(nr_vertices * sizeof(int)); Y = (int *)malloc(nr_vertices * sizeof(int)); hipMalloc((void**)&dev_X, nr_vertices * sizeof(int)); hipMalloc((void**)&dev_Y, nr_vertices * sizeof(int)); //cout<<MBRRaw<<endl<<endl; /* for debugging cout<<offset[0]<<" "<<offset[1]<<endl; cout<<offsetRaw[0]<<" "<<offsetRaw[1]<<endl; cout<<MBRRaw<<endl<<endl; cout<<verticesRaw<<endl; */ //cout<<"number per THREAD1 "<<getSizeBasedGPUConf1(4*nr_polys*sizeof(int))/(BLOCK1*THREAD1)<<endl; hipMemcpy(dev_MBRRaw, MBRRaw, 20*nr_polys*sizeof(char), hipMemcpyHostToDevice); /* timing utilities hipEvent_t start, stop; float elapsedTime; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0 ); */ hipLaunchKernelGGL(( parseMBR), dim3(BLOCK1), dim3(THREAD1), 0, stream[dno], dev_MBRRaw, dev_MBR, nr_polys); hipDeviceSynchronize(); /* hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &elapsedTime, start, stop ); cerr <<"Time to parse MBR: " <<elapsedTime<<"ms"<<endl; hipEventDestroy( start ); hipEventDestroy( stop ); */ hipMemcpy(MBR, dev_MBR, 4*nr_polys*sizeof(int), hipMemcpyDeviceToHost); /* debug purpose *for (int i=0; i<nr_polys; i++) { * cout<<MBR[4*i]<<" "<<MBR[4*i+1]<<" "<<MBR[4*i+2]<<" "<<MBR[4*i+3] << endl; *} */ hipMemcpy(dev_verticesRaw, verticesRaw, nr_vertices * 10, hipMemcpyHostToDevice); hipMemcpy(dev_offsetRaw, offsetRaw, nr_polys*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_numOfVerticesInApoly, numOfVerticesInApoly, nr_polys*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_offset, offset, nr_polys*sizeof(int), hipMemcpyHostToDevice); /* hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0 ); */ hipLaunchKernelGGL(( parseVertices), dim3(BLOCK1), dim3(THREAD1), 0, stream[dno], dev_verticesRaw, dev_offsetRaw, dev_numOfVerticesInApoly, dev_X, dev_Y, dev_offset, nr_polys); hipDeviceSynchronize(); /* hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &elapsedTime, start, stop ); cerr <<"Time to parse POLY: " <<elapsedTime<<"ms"<<endl; */ hipMemcpy(X, dev_X, nr_vertices * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(Y, dev_Y, nr_vertices * sizeof(int), hipMemcpyDeviceToHost); poly_array_t *polys = (poly_array_t *)malloc(sizeof(poly_array_t)); alloc_poly_array(polys, nr_polys, nr_vertices); memcpy(polys->mbrs, MBR, sizeof(mbr_t) * nr_polys); memcpy(polys->offsets, offset, sizeof(int) * nr_polys); polys->offsets[nr_polys] = nr_vertices; memcpy(polys->x, X, sizeof(int) * nr_vertices); memcpy(polys->y, Y, sizeof(int) * nr_vertices); //int ret_X[nr_vertices], ret_Y[nr_vertices]; //for (int i=0; i<nr_polys; i++) { // memcpy(&ret_X[offset[i]], &X[offsetInGPUMem[i]], numOfVerticesInApoly[i]*sizeof(int)); // memcpy(&ret_Y[offset[i]], &Y[offsetInGPUMem[i]], numOfVerticesInApoly[i]*sizeof(int)); //} //cout<<" "<<X[nr_vertices - 1]<<" "<<Y[nr_vertices - 1]; /* for (int i=0; i<nr_polys; i++) { cout<<numOfVerticesInApoly[i] ; // <<MBR[4*i]<<" "<<MBR[4*i+1]<<" "<<MBR[4*i+2]<<" "<<MBR[4*i+3]; for (int j=0; j<numOfVerticesInApoly[i]; j++){ cout<<", "<<X[offset[i]+j]<<" "<<Y[offset[i]+j]; } cout<<endl; }*/ free(MBRRaw); free(verticesRaw); free(offset); free(offsetRaw); free(numOfVerticesInApoly); free(MBR); free(X); free(Y); hipFree(dev_MBRRaw); hipFree(dev_MBR); hipFree(dev_verticesRaw); hipFree(dev_offsetRaw); hipFree(dev_numOfVerticesInApoly); hipFree(dev_offset); hipFree(dev_X); hipFree(dev_Y); return polys; }
54ad012ab74f93324f05f0224b98a9bf2801e5b3.cu
#include <iostream> #include <vector> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/stat.h> #include <sys/time.h> //#include "gpu_spatial.h" #include "parser.cuh" using namespace std; #define THREAD1 128 #define BLOCK1 128 #define THREAD2 64 #define BLOCK2 128 extern cudaStream_t *stream; __global__ void parseMBR(char *MBRRaw, int *MBR, int nr_polys){ int baseIdx = blockDim.x * blockIdx.x + threadIdx.x ; if (baseIdx < nr_polys) { baseIdx = baseIdx * 4 ; for (int i=0; i< 4; i++) { int dstIndx = baseIdx + i ; int srcIndx = dstIndx * 5; MBR[dstIndx] = (MBRRaw[srcIndx] - '0') * 1000 + (MBRRaw[srcIndx + 1] - '0') * 100 + (MBRRaw[srcIndx+2] - '0') * 10 + (MBRRaw[srcIndx + 3] - '0'); } } } __global__ void parseVertices(char *verticesRaw, int *offsetRaw, int* numOfVerticesInApoly, int *X, int*Y, int *offset, int nr_polys){ int polyIdx = blockDim.x * blockIdx.x + threadIdx.x ; // 1 polygon / thread if (polyIdx < nr_polys){ int numOfVertices = numOfVerticesInApoly[polyIdx]; int raw_offset = offsetRaw[polyIdx]; // raw offset in GPU memory int ver_offset = offset[polyIdx]; // vertex offset in GPU memory for (int j=0; j<numOfVertices; j++) { int srcIndx = raw_offset + j*10; X[ver_offset + j ] = (verticesRaw[srcIndx] - '0') * 1000 + (verticesRaw[srcIndx + 1] - '0') * 100 + (verticesRaw[srcIndx+2] - '0') * 10 + (verticesRaw[srcIndx + 3] - '0'); Y[ver_offset + j ] = (verticesRaw[srcIndx + 5] - '0') * 1000 + (verticesRaw[srcIndx + 6 ] - '0') * 100 + (verticesRaw[srcIndx + 7 ] - '0') * 10 + (verticesRaw[srcIndx + 8 ] - '0'); } } } struct timespec diff(struct timespec start, struct timespec end){ struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } int getSizeBasedGPUConf1(int size){ return (size/(THREAD1*BLOCK1) + 1)*THREAD1*BLOCK1; } int getSizeBasedGPUConf2(int size){ return (size/THREAD2 + 1)*THREAD2; } int alloc_poly_array(poly_array_t *polys, const int nr_polys, const int nr_vertices) { int size_mbrs = nr_polys * sizeof(mbr_t); int size_offsets = (nr_polys + 1) * sizeof(int); int size_x = nr_vertices * sizeof(int); int size_y = nr_vertices * sizeof(int); polys->mbrs = (mbr_t *)malloc(size_mbrs + size_offsets + size_x + size_y); if(!polys->mbrs) { fprintf(stderr, "failed to allocate memory for poly array\n"); exit(1); } polys->nr_polys = nr_polys; polys->nr_vertices = nr_vertices; polys->offsets = (int *)((char *)(polys->mbrs) + size_mbrs); polys->x = (int *)((char *)(polys->offsets) + size_offsets); polys->y = (int *)((char *)(polys->x) + size_x); return 0; } poly_array_t *gpu_parse(int dno, const int nv, std::vector<string> * poly_vec) { char *MBRRaw, *verticesRaw, numOfVerticesBuf[4]; char *dev_MBRRaw, *dev_verticesRaw; int *offset, *offsetRaw, *numOfVerticesInApoly; int *dev_offset, *dev_offsetRaw, *dev_numOfVerticesInApoly; int *MBR, *X, *Y ; int *dev_MBR, *dev_X, *dev_Y; static const int BUF_SIZE = 8192; char buf[BUF_SIZE]; int nr_polys = poly_vec->size(); // number of polygons int nr_vertices = nv; // number of vertices //std::cerr << "inGPU poly parsing ..." << std::endl; // cout <<"num of polygon: " <<nr_polys <<" , number of vertices:" <<nr_vertices<<endl; MBRRaw = (char *)malloc(20*nr_polys*sizeof(char)); MBR = (int *)malloc(4*nr_polys*sizeof(int)); cudaSetDevice(dno); cudaMalloc((void**)&dev_MBRRaw, 20*nr_polys*sizeof(char)); cudaMalloc((void**)&dev_MBR, 4*nr_polys*sizeof(int)); verticesRaw = (char *)malloc(nr_vertices * 10); offset = (int *)malloc(nr_polys*sizeof(int)); offsetRaw = (int *)malloc(nr_polys*sizeof(int)); numOfVerticesInApoly = (int *)malloc(nr_polys*sizeof(int)); cudaMalloc((void**)&dev_verticesRaw, nr_vertices * 10); cudaMalloc((void**)&dev_offsetRaw, nr_polys*sizeof(int)); cudaMalloc((void**)&dev_numOfVerticesInApoly, nr_polys*sizeof(int)); cudaMalloc((void**)&dev_offset, nr_polys*sizeof(int)); int rawBufferIndx = 0; int vertexIndx = 0; int numVertices = 0; /* each iteration process a line */ std::size_t len =0 ; for (int i= 0 ; i<nr_polys; i++) { len = (*poly_vec)[i].copy(buf, BUF_SIZE); buf[len] = '\0'; memcpy(numOfVerticesBuf, buf, 4); // memcpy(MBRRaw + 20*i, buf + 5, 20); offset[i] = vertexIndx; offsetRaw[i] = rawBufferIndx; numVertices = atoi(numOfVerticesBuf); numOfVerticesInApoly[i] = numVertices; memcpy(verticesRaw + rawBufferIndx, buf + 25, numVertices * 10); vertexIndx += numVertices; rawBufferIndx += len - 25; //break; //cout<<atoi(numOfVerticesBuf)<<endl; } //cout<<getSizeBasedGPUConf2(nr_vertices*sizeof(int))<<" "<<(nr_vertices*sizeof(int)/THREAD2 + 1 )*THREAD2 // <<" "<<numVerticesInGPUMem<<" "<<nr_polys/BLOCK2 + 1<<" "<<rawBufferIndx<<endl; //cout<<verticesRaw + offsetRaw[nr_polys-1]<<endl; //cout<<verticesRaw<<endl; X = (int *)malloc(nr_vertices * sizeof(int)); Y = (int *)malloc(nr_vertices * sizeof(int)); cudaMalloc((void**)&dev_X, nr_vertices * sizeof(int)); cudaMalloc((void**)&dev_Y, nr_vertices * sizeof(int)); //cout<<MBRRaw<<endl<<endl; /* for debugging cout<<offset[0]<<" "<<offset[1]<<endl; cout<<offsetRaw[0]<<" "<<offsetRaw[1]<<endl; cout<<MBRRaw<<endl<<endl; cout<<verticesRaw<<endl; */ //cout<<"number per THREAD1 "<<getSizeBasedGPUConf1(4*nr_polys*sizeof(int))/(BLOCK1*THREAD1)<<endl; cudaMemcpy(dev_MBRRaw, MBRRaw, 20*nr_polys*sizeof(char), cudaMemcpyHostToDevice); /* timing utilities cudaEvent_t start, stop; float elapsedTime; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); */ parseMBR<<<BLOCK1, THREAD1, 0, stream[dno]>>>(dev_MBRRaw, dev_MBR, nr_polys); cudaThreadSynchronize(); /* cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsedTime, start, stop ); cerr <<"Time to parse MBR: " <<elapsedTime<<"ms"<<endl; cudaEventDestroy( start ); cudaEventDestroy( stop ); */ cudaMemcpy(MBR, dev_MBR, 4*nr_polys*sizeof(int), cudaMemcpyDeviceToHost); /* debug purpose *for (int i=0; i<nr_polys; i++) { * cout<<MBR[4*i]<<" "<<MBR[4*i+1]<<" "<<MBR[4*i+2]<<" "<<MBR[4*i+3] << endl; *} */ cudaMemcpy(dev_verticesRaw, verticesRaw, nr_vertices * 10, cudaMemcpyHostToDevice); cudaMemcpy(dev_offsetRaw, offsetRaw, nr_polys*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_numOfVerticesInApoly, numOfVerticesInApoly, nr_polys*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_offset, offset, nr_polys*sizeof(int), cudaMemcpyHostToDevice); /* cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); */ parseVertices<<<BLOCK1, THREAD1, 0, stream[dno]>>>(dev_verticesRaw, dev_offsetRaw, dev_numOfVerticesInApoly, dev_X, dev_Y, dev_offset, nr_polys); cudaThreadSynchronize(); /* cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsedTime, start, stop ); cerr <<"Time to parse POLY: " <<elapsedTime<<"ms"<<endl; */ cudaMemcpy(X, dev_X, nr_vertices * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(Y, dev_Y, nr_vertices * sizeof(int), cudaMemcpyDeviceToHost); poly_array_t *polys = (poly_array_t *)malloc(sizeof(poly_array_t)); alloc_poly_array(polys, nr_polys, nr_vertices); memcpy(polys->mbrs, MBR, sizeof(mbr_t) * nr_polys); memcpy(polys->offsets, offset, sizeof(int) * nr_polys); polys->offsets[nr_polys] = nr_vertices; memcpy(polys->x, X, sizeof(int) * nr_vertices); memcpy(polys->y, Y, sizeof(int) * nr_vertices); //int ret_X[nr_vertices], ret_Y[nr_vertices]; //for (int i=0; i<nr_polys; i++) { // memcpy(&ret_X[offset[i]], &X[offsetInGPUMem[i]], numOfVerticesInApoly[i]*sizeof(int)); // memcpy(&ret_Y[offset[i]], &Y[offsetInGPUMem[i]], numOfVerticesInApoly[i]*sizeof(int)); //} //cout<<" "<<X[nr_vertices - 1]<<" "<<Y[nr_vertices - 1]; /* for (int i=0; i<nr_polys; i++) { cout<<numOfVerticesInApoly[i] ; // <<MBR[4*i]<<" "<<MBR[4*i+1]<<" "<<MBR[4*i+2]<<" "<<MBR[4*i+3]; for (int j=0; j<numOfVerticesInApoly[i]; j++){ cout<<", "<<X[offset[i]+j]<<" "<<Y[offset[i]+j]; } cout<<endl; }*/ free(MBRRaw); free(verticesRaw); free(offset); free(offsetRaw); free(numOfVerticesInApoly); free(MBR); free(X); free(Y); cudaFree(dev_MBRRaw); cudaFree(dev_MBR); cudaFree(dev_verticesRaw); cudaFree(dev_offsetRaw); cudaFree(dev_numOfVerticesInApoly); cudaFree(dev_offset); cudaFree(dev_X); cudaFree(dev_Y); return polys; }
408812e5f9dc48022015b9e327c1136f88d99747.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <gflags/gflags.h> #include <groute/device/cta_scheduler.cuh> #include <groute/graphs/csr_graph.h> #include <groute/dwl/distributed_worklist.cuh> #include <groute/dwl/workers.cuh> #include <utils/graphs/traversal.h> #include "sssp_common.h" const distance_t INF = UINT_MAX; DEFINE_int32(source_node, 0, "The source node for the SSSP traversal (clamped to [0, nnodes-1])"); namespace sssp { struct DistanceData { index_t node; distance_t distance; __device__ __host__ __forceinline__ DistanceData(index_t node, distance_t distance) : node(node), distance(distance) {} __device__ __host__ __forceinline__ DistanceData() : node(INF), distance(INF) {} }; typedef index_t local_work_t; typedef DistanceData remote_work_t; __global__ void SSSPMemsetKernel(distance_t *distances, int nnodes) { int tid = TID_1D; if (tid < nnodes) { distances[tid] = INF; } } template<bool CTAScheduling = true> /// SSSP work with Collective Thread Array scheduling for exploiting nested parallelism struct SSSPWork { template< typename WorkSource, typename WorkTarget, typename TGraph, typename TWeightDatum, typename TDistanceDatum> __device__ static void work( const WorkSource &work_source, WorkTarget &work_target, const TGraph &graph, TWeightDatum &edge_weights, TDistanceDatum &node_distances ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<distance_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = work_source.get_work(i); np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = node_distances.get_item(node); } groute::dev::CTAWorkScheduler<distance_t>::template schedule( np_local, [&work_target, &graph, &edge_weights, &node_distances](index_t edge, index_t size, distance_t distance) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight)) { work_target.append_work(DistanceData(dest, distance + weight)); } } ); } } }; template<> /// SSSP work without CTA support struct SSSPWork<false> { template< typename WorkSource, typename WorkTarget, typename TGraph, typename TWeightDatum, typename TDistanceDatum> __device__ static void work( const WorkSource &work_source, WorkTarget &work_target, const TGraph &graph, TWeightDatum &edge_weights, TDistanceDatum &node_distances ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.get_work(i); distance_t distance = node_distances.get_item(node); for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight)) { work_target.append_work(DistanceData(dest, distance + weight)); } } } } }; struct DWCallbacks { private: groute::graphs::dev::CSRGraphSeg m_graph_seg; groute::graphs::dev::GraphDatum<distance_t> m_distances_datum; public: template<typename...UnusedData> DWCallbacks( const groute::graphs::dev::CSRGraphSeg &graph_seg, const groute::graphs::dev::GraphDatumSeg<distance_t> &weights_datum, const groute::graphs::dev::GraphDatum<distance_t> &distances_datum, UnusedData &... data) : m_graph_seg(graph_seg), m_distances_datum(distances_datum) { } DWCallbacks() {} __device__ __forceinline__ groute::SplitFlags on_receive(const remote_work_t &work) { if (m_graph_seg.owns(work.node)) { return (work.distance < atomicMin(m_distances_datum.get_item_ptr(work.node), work.distance)) ? groute::SF_Take : groute::SF_None; // Filter } return groute::SF_Pass; } __device__ __forceinline__ bool should_defer(const local_work_t &work, const distance_t &global_threshold) { return m_distances_datum[work] > global_threshold; } __device__ __forceinline__ groute::SplitFlags on_send(local_work_t work) { return (m_graph_seg.owns(work)) ? groute::SF_Take : groute::SF_Pass; } __device__ __forceinline__ remote_work_t pack(local_work_t work) { return DistanceData(work, m_distances_datum.get_item(work)); } __device__ __forceinline__ local_work_t unpack(const remote_work_t &work) { return work.node; } }; struct Algo { static const char *NameLower() { return "sssp"; } static const char *Name() { return "SSSP"; } static void HostInit( utils::traversal::Context<sssp::Algo> &context, groute::graphs::multi::CSRGraphAllocator &graph_manager, groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist) { // Get a valid source_node from flag index_t source_node = min(max((index_t) 0, (index_t) FLAGS_source_node), context.host_graph.nnodes - 1); // Map to the (possibly new) partitioned vertex space source_node = graph_manager.GetGraphPartitioner()->ReverseLookup(source_node); // Host endpoint for sending initial work groute::Endpoint host = groute::Endpoint::HostEndpoint(0); // Report the initial work distributed_worklist.ReportInitialWork(1, host); std::vector<remote_work_t> initial_work; initial_work.push_back(remote_work_t(source_node, 0)); distributed_worklist .GetLink(host) .Send(groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event()); } template<typename TGraph, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static void DeviceMemset(groute::Stream &stream, TGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, const UnusedData &... data) { dim3 grid_dims, block_dims; KernelSizing(grid_dims, block_dims, distances_datum.size); SSSPMemsetKernel << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( distances_datum.data_ptr, distances_datum.size); } template<typename TGraph, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static void DeviceInit( groute::Endpoint endpoint, groute::Stream &stream, groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist, groute::IDistributedWorklistPeer<local_work_t, remote_work_t, DWCallbacks> *peer, TGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, const UnusedData &... data) { } template< typename TGraphAllocator, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static const std::vector<distance_t> &Gather(TGraphAllocator &graph_allocator, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, UnusedData &... data) { graph_allocator.GatherDatum(distances_datum); return distances_datum.GetHostData(); } template< typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static std::vector<distance_t> Host(groute::graphs::host::CSRGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, UnusedData &... data) { return SSSPHostNaive(graph, weights_datum.GetHostDataPtr(), min(max((index_t) 0, (index_t) FLAGS_source_node), graph.nnodes - 1)); } static int Output(const char *file, const std::vector<distance_t> &distances) { return SSSPOutput(file, distances); } static int CheckErrors(const std::vector<distance_t> &distances, const std::vector<distance_t> &regression) { return SSSPCheckErrors(distances, regression); } }; using EdgeWeightDatumType = groute::graphs::multi::EdgeInputDatum<distance_t>; using NodeDistanceDatumType = groute::graphs::multi::NodeOutputGlobalDatum<distance_t>; template<bool IterationFusion = true, bool CTAScheduling = true> using FusedWorkerType = groute::FusedWorker< IterationFusion, local_work_t, remote_work_t, int, DWCallbacks, SSSPWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, EdgeWeightDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType>; template<bool CTAScheduling = true> using WorkerType = groute::Worker< local_work_t, remote_work_t, DWCallbacks, SSSPWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, EdgeWeightDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType>; template<typename TWorker> using RunnerType = utils::traversal::Runner< Algo, TWorker, DWCallbacks, local_work_t, remote_work_t, EdgeWeightDatumType, NodeDistanceDatumType>; } template<typename TWorker> bool TestSSSPAsyncMultiTemplate(int ngpus) { sssp::RunnerType<TWorker> runner; sssp::EdgeWeightDatumType edge_weights; sssp::NodeDistanceDatumType node_distances; return runner(ngpus, FLAGS_prio_delta, edge_weights, node_distances); } bool TestSSSPAsyncMultiOptimized(int ngpus) { return FLAGS_cta_np ? FLAGS_iteration_fusion ? TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<true, true >>(ngpus) : TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<false, true >>(ngpus) : FLAGS_iteration_fusion ? TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<true, false >>(ngpus) : TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<false, false >>(ngpus); } bool TestSSSPAsyncMulti(int ngpus) { return FLAGS_cta_np ? TestSSSPAsyncMultiTemplate<sssp::WorkerType<true >>(ngpus) : TestSSSPAsyncMultiTemplate<sssp::WorkerType<false >>(ngpus); } bool TestSSSPSingle() { return TestSSSPAsyncMultiOptimized(1); }
408812e5f9dc48022015b9e327c1136f88d99747.cu
// Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <gflags/gflags.h> #include <groute/device/cta_scheduler.cuh> #include <groute/graphs/csr_graph.h> #include <groute/dwl/distributed_worklist.cuh> #include <groute/dwl/workers.cuh> #include <utils/graphs/traversal.h> #include "sssp_common.h" const distance_t INF = UINT_MAX; DEFINE_int32(source_node, 0, "The source node for the SSSP traversal (clamped to [0, nnodes-1])"); namespace sssp { struct DistanceData { index_t node; distance_t distance; __device__ __host__ __forceinline__ DistanceData(index_t node, distance_t distance) : node(node), distance(distance) {} __device__ __host__ __forceinline__ DistanceData() : node(INF), distance(INF) {} }; typedef index_t local_work_t; typedef DistanceData remote_work_t; __global__ void SSSPMemsetKernel(distance_t *distances, int nnodes) { int tid = TID_1D; if (tid < nnodes) { distances[tid] = INF; } } template<bool CTAScheduling = true> /// SSSP work with Collective Thread Array scheduling for exploiting nested parallelism struct SSSPWork { template< typename WorkSource, typename WorkTarget, typename TGraph, typename TWeightDatum, typename TDistanceDatum> __device__ static void work( const WorkSource &work_source, WorkTarget &work_target, const TGraph &graph, TWeightDatum &edge_weights, TDistanceDatum &node_distances ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<distance_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = work_source.get_work(i); np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = node_distances.get_item(node); } groute::dev::CTAWorkScheduler<distance_t>::template schedule( np_local, [&work_target, &graph, &edge_weights, &node_distances](index_t edge, index_t size, distance_t distance) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight)) { work_target.append_work(DistanceData(dest, distance + weight)); } } ); } } }; template<> /// SSSP work without CTA support struct SSSPWork<false> { template< typename WorkSource, typename WorkTarget, typename TGraph, typename TWeightDatum, typename TDistanceDatum> __device__ static void work( const WorkSource &work_source, WorkTarget &work_target, const TGraph &graph, TWeightDatum &edge_weights, TDistanceDatum &node_distances ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.get_work(i); distance_t distance = node_distances.get_item(node); for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight)) { work_target.append_work(DistanceData(dest, distance + weight)); } } } } }; struct DWCallbacks { private: groute::graphs::dev::CSRGraphSeg m_graph_seg; groute::graphs::dev::GraphDatum<distance_t> m_distances_datum; public: template<typename...UnusedData> DWCallbacks( const groute::graphs::dev::CSRGraphSeg &graph_seg, const groute::graphs::dev::GraphDatumSeg<distance_t> &weights_datum, const groute::graphs::dev::GraphDatum<distance_t> &distances_datum, UnusedData &... data) : m_graph_seg(graph_seg), m_distances_datum(distances_datum) { } DWCallbacks() {} __device__ __forceinline__ groute::SplitFlags on_receive(const remote_work_t &work) { if (m_graph_seg.owns(work.node)) { return (work.distance < atomicMin(m_distances_datum.get_item_ptr(work.node), work.distance)) ? groute::SF_Take : groute::SF_None; // Filter } return groute::SF_Pass; } __device__ __forceinline__ bool should_defer(const local_work_t &work, const distance_t &global_threshold) { return m_distances_datum[work] > global_threshold; } __device__ __forceinline__ groute::SplitFlags on_send(local_work_t work) { return (m_graph_seg.owns(work)) ? groute::SF_Take : groute::SF_Pass; } __device__ __forceinline__ remote_work_t pack(local_work_t work) { return DistanceData(work, m_distances_datum.get_item(work)); } __device__ __forceinline__ local_work_t unpack(const remote_work_t &work) { return work.node; } }; struct Algo { static const char *NameLower() { return "sssp"; } static const char *Name() { return "SSSP"; } static void HostInit( utils::traversal::Context<sssp::Algo> &context, groute::graphs::multi::CSRGraphAllocator &graph_manager, groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist) { // Get a valid source_node from flag index_t source_node = min(max((index_t) 0, (index_t) FLAGS_source_node), context.host_graph.nnodes - 1); // Map to the (possibly new) partitioned vertex space source_node = graph_manager.GetGraphPartitioner()->ReverseLookup(source_node); // Host endpoint for sending initial work groute::Endpoint host = groute::Endpoint::HostEndpoint(0); // Report the initial work distributed_worklist.ReportInitialWork(1, host); std::vector<remote_work_t> initial_work; initial_work.push_back(remote_work_t(source_node, 0)); distributed_worklist .GetLink(host) .Send(groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event()); } template<typename TGraph, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static void DeviceMemset(groute::Stream &stream, TGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, const UnusedData &... data) { dim3 grid_dims, block_dims; KernelSizing(grid_dims, block_dims, distances_datum.size); SSSPMemsetKernel << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( distances_datum.data_ptr, distances_datum.size); } template<typename TGraph, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static void DeviceInit( groute::Endpoint endpoint, groute::Stream &stream, groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist, groute::IDistributedWorklistPeer<local_work_t, remote_work_t, DWCallbacks> *peer, TGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, const UnusedData &... data) { } template< typename TGraphAllocator, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static const std::vector<distance_t> &Gather(TGraphAllocator &graph_allocator, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, UnusedData &... data) { graph_allocator.GatherDatum(distances_datum); return distances_datum.GetHostData(); } template< typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static std::vector<distance_t> Host(groute::graphs::host::CSRGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, UnusedData &... data) { return SSSPHostNaive(graph, weights_datum.GetHostDataPtr(), min(max((index_t) 0, (index_t) FLAGS_source_node), graph.nnodes - 1)); } static int Output(const char *file, const std::vector<distance_t> &distances) { return SSSPOutput(file, distances); } static int CheckErrors(const std::vector<distance_t> &distances, const std::vector<distance_t> &regression) { return SSSPCheckErrors(distances, regression); } }; using EdgeWeightDatumType = groute::graphs::multi::EdgeInputDatum<distance_t>; using NodeDistanceDatumType = groute::graphs::multi::NodeOutputGlobalDatum<distance_t>; template<bool IterationFusion = true, bool CTAScheduling = true> using FusedWorkerType = groute::FusedWorker< IterationFusion, local_work_t, remote_work_t, int, DWCallbacks, SSSPWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, EdgeWeightDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType>; template<bool CTAScheduling = true> using WorkerType = groute::Worker< local_work_t, remote_work_t, DWCallbacks, SSSPWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, EdgeWeightDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType>; template<typename TWorker> using RunnerType = utils::traversal::Runner< Algo, TWorker, DWCallbacks, local_work_t, remote_work_t, EdgeWeightDatumType, NodeDistanceDatumType>; } template<typename TWorker> bool TestSSSPAsyncMultiTemplate(int ngpus) { sssp::RunnerType<TWorker> runner; sssp::EdgeWeightDatumType edge_weights; sssp::NodeDistanceDatumType node_distances; return runner(ngpus, FLAGS_prio_delta, edge_weights, node_distances); } bool TestSSSPAsyncMultiOptimized(int ngpus) { return FLAGS_cta_np ? FLAGS_iteration_fusion ? TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<true, true >>(ngpus) : TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<false, true >>(ngpus) : FLAGS_iteration_fusion ? TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<true, false >>(ngpus) : TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<false, false >>(ngpus); } bool TestSSSPAsyncMulti(int ngpus) { return FLAGS_cta_np ? TestSSSPAsyncMultiTemplate<sssp::WorkerType<true >>(ngpus) : TestSSSPAsyncMultiTemplate<sssp::WorkerType<false >>(ngpus); } bool TestSSSPSingle() { return TestSSSPAsyncMultiOptimized(1); }
a55b2c1b1498ecb103839071e8a61965a1345abf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zcompact.cu, normal z -> s, Thu Oct 8 23:05:49 2020 @author Stan Tomov */ #include "magmasparse_internal.h" #define NB 64 /* ===================================================================== Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread handles one row, iterating across all columns. */ __global__ void scompact_kernel( int m, int n, float *dA, int ldda, float *dnorms, float tol, magma_int_t *active, magma_int_t *cBlock) { // dA is processed across row i (by the current thread) int i = blockIdx.x*blockDim.x + threadIdx.x; int cBlockSize = 0; if ( i < m ) { dA += i; for(int j = 0; j<n; j++){ if (dnorms[j] > tol && active[j]){ dA[ldda*cBlockSize] = dA[ldda*j]; cBlockSize++; } else if (i==0) active[j] = 0; } } if (i==0) *cBlock = cBlockSize; } __global__ void scompactactive_kernel( int m, int n, float *dA, int ldda, magma_int_t *active) { // dA is processed across row i (by the current thread) int i = blockIdx.x*blockDim.x + threadIdx.x; int cBlockSize = 0; if ( i < m ) { dA += i; for(int j = 0; j<n; j++){ if (active[j]){ dA[ldda*cBlockSize] = dA[ldda*j]; cBlockSize++; } } } } /* ===================================================================== */ /** Purpose ------- ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and compacts them into the cBlock size<=n vectors that have norms > tol. The active mask array has 1 or 0, showing if a vector remained or not in the compacted resulting set of vectors. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in,out] dA COMPLEX REAL array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] dnorms REAL array, dimension N The norms of the N vectors in dA @param[in] tol DOUBLE PRECISON The tolerance value used in the criteria to compact or not. @param[in,out] active INTEGER array, dimension N A mask of 1s and 0s showing if a vector remains or has been removed @param[in,out] cBlock magmaInt_ptr The number of vectors that remain in dA (i.e., with norms > tol). @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_scompact( magma_int_t m, magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda, magmaFloat_ptr dnorms, float tol, magmaInt_ptr active, magmaInt_ptr cBlock, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } if ( m == 0 || n == 0 ) return info; dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); hipLaunchKernelGGL(( scompact_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dnorms, tol, active, active+n ); magma_igetvector( 1, active+n, 1, cBlock, 1, queue ); return info; } /* ===================================================================== */ /** Purpose ------- ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an array of 1s and 0sindicating which vectors to compact (for 1s) and which to disregard (for 0s). Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in,out] dA COMPLEX REAL array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] active INTEGER array, dimension N A mask of 1s and 0s showing if a vector remains or has been removed @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_scompactActive( magma_int_t m, magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda, magmaInt_ptr active, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } if ( m == 0 || n == 0 ) return info; dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); hipLaunchKernelGGL(( scompactactive_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, active); return info; } /* ===================================================================== */
a55b2c1b1498ecb103839071e8a61965a1345abf.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zcompact.cu, normal z -> s, Thu Oct 8 23:05:49 2020 @author Stan Tomov */ #include "magmasparse_internal.h" #define NB 64 /* ===================================================================== Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread handles one row, iterating across all columns. */ __global__ void scompact_kernel( int m, int n, float *dA, int ldda, float *dnorms, float tol, magma_int_t *active, magma_int_t *cBlock) { // dA is processed across row i (by the current thread) int i = blockIdx.x*blockDim.x + threadIdx.x; int cBlockSize = 0; if ( i < m ) { dA += i; for(int j = 0; j<n; j++){ if (dnorms[j] > tol && active[j]){ dA[ldda*cBlockSize] = dA[ldda*j]; cBlockSize++; } else if (i==0) active[j] = 0; } } if (i==0) *cBlock = cBlockSize; } __global__ void scompactactive_kernel( int m, int n, float *dA, int ldda, magma_int_t *active) { // dA is processed across row i (by the current thread) int i = blockIdx.x*blockDim.x + threadIdx.x; int cBlockSize = 0; if ( i < m ) { dA += i; for(int j = 0; j<n; j++){ if (active[j]){ dA[ldda*cBlockSize] = dA[ldda*j]; cBlockSize++; } } } } /* ===================================================================== */ /** Purpose ------- ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and compacts them into the cBlock size<=n vectors that have norms > tol. The active mask array has 1 or 0, showing if a vector remained or not in the compacted resulting set of vectors. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in,out] dA COMPLEX REAL array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] dnorms REAL array, dimension N The norms of the N vectors in dA @param[in] tol DOUBLE PRECISON The tolerance value used in the criteria to compact or not. @param[in,out] active INTEGER array, dimension N A mask of 1s and 0s showing if a vector remains or has been removed @param[in,out] cBlock magmaInt_ptr The number of vectors that remain in dA (i.e., with norms > tol). @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_scompact( magma_int_t m, magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda, magmaFloat_ptr dnorms, float tol, magmaInt_ptr active, magmaInt_ptr cBlock, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } if ( m == 0 || n == 0 ) return info; dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); scompact_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, dA, ldda, dnorms, tol, active, active+n ); magma_igetvector( 1, active+n, 1, cBlock, 1, queue ); return info; } /* ===================================================================== */ /** Purpose ------- ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an array of 1s and 0sindicating which vectors to compact (for 1s) and which to disregard (for 0s). Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in,out] dA COMPLEX REAL array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] active INTEGER array, dimension N A mask of 1s and 0s showing if a vector remains or has been removed @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_scompactActive( magma_int_t m, magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda, magmaInt_ptr active, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } if ( m == 0 || n == 0 ) return info; dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); scompactactive_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, dA, ldda, active); return info; } /* ===================================================================== */
f74140703d70568903bfe3b1305c9b803508b217.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 1024 void matrixAdd(int *a, int *b, int *c, int N) { int index; for (int col = 0; col < N; col++) { for (int row = 0; row < N; row++) { c[index] = a[index] + b[index]; } } } __global__ void matrixAddKernel(int *a, int *b, int *c, int N) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int index = row * N + col; c[index] = a[index] + b[index]; } int main() { int N = 4096; // Define size of 1 side of square matrix // Initialise grid and block variables dim3 grid(N / THREADS_PER_BLOCK, 1, 1); dim3 block(THREADS_PER_BLOCK, 1, 1); // Initialise host pointers (dynamically allocated memory) and device pointers int *a_h; int *b_h; int *c_h; // GPU results int *d_h; // CPU results int *a_d; int *b_d; int *c_d; int size; // Number of bytes required by arrays // Create timer hipEvent_t start; hipEvent_t stop; float elapsedTime; // Print out information about blocks and threads printf("Number of threads: %i (%ix%i)\n", block.x*block.y, block.x, block.y); printf("Number of blocks: %i (%ix%i)\n", grid.x*grid.y, grid.x, grid.y); // Dynamically allocate host memory size = N * N * sizeof(int); a_h = (int*) malloc(size); b_h = (int*) malloc(size); c_h = (int*) malloc(size); d_h = (int*) malloc(size); // Load host arrays with data for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { a_h[i * N + j] = i; b_h[i * N + j] = i; } } // Allocate device memory hipMalloc((void**)&a_d, size); hipMalloc((void**)&b_d, size); hipMalloc((void**)&c_d, size); // Copy host memory to device memory hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice); hipMemcpy(c_d, c_h, size, hipMemcpyHostToDevice); // Start timer for GPU hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // Launch kernel hipLaunchKernelGGL(( matrixAddKernel), dim3(grid), dim3(block), 0, 0, a_d, b_d, c_d, N); // Stop timer hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); // Print execution time printf("Time to calculate results on GPU: %f ms\n", elapsedTime); // Copy results to device hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost); // Start timer for CPU hipEventRecord(start, 0); // Launch CPU code matrixAdd(a_h, b_h, d_h, N); // Stop timer hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); // Print execution time printf("Time to calculate results on CPU: %f ms\n", elapsedTime); // Compare results for (int i = 0; i < N*N; i++) { if (c_h[i] != d_h[i]) { printf("Error: CPU and GPU results do not match\n"); break; } } // Free memory free(a_h); free(b_h); free(c_h); free(d_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
f74140703d70568903bfe3b1305c9b803508b217.cu
#include <stdio.h> #include <cuda.h> #define THREADS_PER_BLOCK 1024 void matrixAdd(int *a, int *b, int *c, int N) { int index; for (int col = 0; col < N; col++) { for (int row = 0; row < N; row++) { c[index] = a[index] + b[index]; } } } __global__ void matrixAddKernel(int *a, int *b, int *c, int N) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int index = row * N + col; c[index] = a[index] + b[index]; } int main() { int N = 4096; // Define size of 1 side of square matrix // Initialise grid and block variables dim3 grid(N / THREADS_PER_BLOCK, 1, 1); dim3 block(THREADS_PER_BLOCK, 1, 1); // Initialise host pointers (dynamically allocated memory) and device pointers int *a_h; int *b_h; int *c_h; // GPU results int *d_h; // CPU results int *a_d; int *b_d; int *c_d; int size; // Number of bytes required by arrays // Create timer cudaEvent_t start; cudaEvent_t stop; float elapsedTime; // Print out information about blocks and threads printf("Number of threads: %i (%ix%i)\n", block.x*block.y, block.x, block.y); printf("Number of blocks: %i (%ix%i)\n", grid.x*grid.y, grid.x, grid.y); // Dynamically allocate host memory size = N * N * sizeof(int); a_h = (int*) malloc(size); b_h = (int*) malloc(size); c_h = (int*) malloc(size); d_h = (int*) malloc(size); // Load host arrays with data for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { a_h[i * N + j] = i; b_h[i * N + j] = i; } } // Allocate device memory cudaMalloc((void**)&a_d, size); cudaMalloc((void**)&b_d, size); cudaMalloc((void**)&c_d, size); // Copy host memory to device memory cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice); // Start timer for GPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Launch kernel matrixAddKernel<<<grid, block>>>(a_d, b_d, c_d, N); // Stop timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); // Print execution time printf("Time to calculate results on GPU: %f ms\n", elapsedTime); // Copy results to device cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); // Start timer for CPU cudaEventRecord(start, 0); // Launch CPU code matrixAdd(a_h, b_h, d_h, N); // Stop timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); // Print execution time printf("Time to calculate results on CPU: %f ms\n", elapsedTime); // Compare results for (int i = 0; i < N*N; i++) { if (c_h[i] != d_h[i]) { printf("Error: CPU and GPU results do not match\n"); break; } } // Free memory free(a_h); free(b_h); free(c_h); free(d_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
08b759cd759f4c85fa1a79ba4f5f38944783ff00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "scalar.h" __device__ float op(float d1,float d2,float *params) { return d2 / d1; } extern "C" __global__ void div_scalar_float(int n, int idx,float dx,float *dy,int incy,float *params,float *result) { transform(n,idx,dx,dy,incy,params,result); }
08b759cd759f4c85fa1a79ba4f5f38944783ff00.cu
#include "scalar.h" __device__ float op(float d1,float d2,float *params) { return d2 / d1; } extern "C" __global__ void div_scalar_float(int n, int idx,float dx,float *dy,int incy,float *params,float *result) { transform(n,idx,dx,dy,incy,params,result); }
db8be29f49b2a6a61b4199c3d35820ca146c4779.hip
// !!! This is a file automatically generated by hipify!!! #include "AllDriverPotentialPairExtGPU.cuh" #include "EvaluatorPairSoft.h" #include "EvaluatorPairLJ2.h" #include "EvaluatorPairCoulomb.h" #include "EvaluatorPairSGauss.h" #include "EvaluatorPairSYukawa.h" #include "PotentialPairLoweThermoGPU.cuh" #include "EvaluatorPairLoweThermo.h" // Every evaluator needs a function in this file. The functions are very simple, containing a one line call to // a template that does all of the work. To add a additional function, copy and paste this one, change the // template argument to the correct evaluator <EvaluatorPairMine>, and update the type of the 2nd argument to the // param_type of the evaluator hipError_t gpu_compute_lj2_forces(const pair_args_t& pair_args, const float4 *d_params) { return gpu_compute_pair_forces<EvaluatorPairLJ2>(pair_args, d_params); } hipError_t gpu_compute_soft_forces(const pair_args_t& pair_args, const float3 *d_params) { return gpu_compute_pair_forces<EvaluatorPairSoft>(pair_args, d_params); } hipError_t gpu_compute_coulomb_forces(const pair_args_t& pair_args, const float4 *d_params) { return gpu_compute_pair_forces<EvaluatorPairCoulomb>(pair_args, d_params); } hipError_t gpu_compute_sgauss_forces(const pair_args_t& pair_args, const float2 *d_params) { return gpu_compute_pair_forces<EvaluatorPairSGauss>(pair_args, d_params); } hipError_t gpu_compute_syukawa_forces(const pair_args_t& pair_args, const float2 *d_params) { return gpu_compute_pair_forces<EvaluatorPairSYukawa>(pair_args, d_params); } hipError_t gpu_compute_lowethermolowe_forces(const lowe_pair_args_t& args, const float2 *d_params) { return gpu_compute_lowe_forces<EvaluatorPairLoweThermo>(args, d_params); } hipError_t gpu_compute_lowethermo_forces(const pair_args_t& pair_args, const float2 *d_params) { return gpu_compute_pair_forces<EvaluatorPairLoweThermo>(pair_args, d_params); }
db8be29f49b2a6a61b4199c3d35820ca146c4779.cu
#include "AllDriverPotentialPairExtGPU.cuh" #include "EvaluatorPairSoft.h" #include "EvaluatorPairLJ2.h" #include "EvaluatorPairCoulomb.h" #include "EvaluatorPairSGauss.h" #include "EvaluatorPairSYukawa.h" #include "PotentialPairLoweThermoGPU.cuh" #include "EvaluatorPairLoweThermo.h" // Every evaluator needs a function in this file. The functions are very simple, containing a one line call to // a template that does all of the work. To add a additional function, copy and paste this one, change the // template argument to the correct evaluator <EvaluatorPairMine>, and update the type of the 2nd argument to the // param_type of the evaluator cudaError_t gpu_compute_lj2_forces(const pair_args_t& pair_args, const float4 *d_params) { return gpu_compute_pair_forces<EvaluatorPairLJ2>(pair_args, d_params); } cudaError_t gpu_compute_soft_forces(const pair_args_t& pair_args, const float3 *d_params) { return gpu_compute_pair_forces<EvaluatorPairSoft>(pair_args, d_params); } cudaError_t gpu_compute_coulomb_forces(const pair_args_t& pair_args, const float4 *d_params) { return gpu_compute_pair_forces<EvaluatorPairCoulomb>(pair_args, d_params); } cudaError_t gpu_compute_sgauss_forces(const pair_args_t& pair_args, const float2 *d_params) { return gpu_compute_pair_forces<EvaluatorPairSGauss>(pair_args, d_params); } cudaError_t gpu_compute_syukawa_forces(const pair_args_t& pair_args, const float2 *d_params) { return gpu_compute_pair_forces<EvaluatorPairSYukawa>(pair_args, d_params); } cudaError_t gpu_compute_lowethermolowe_forces(const lowe_pair_args_t& args, const float2 *d_params) { return gpu_compute_lowe_forces<EvaluatorPairLoweThermo>(args, d_params); } cudaError_t gpu_compute_lowethermo_forces(const pair_args_t& pair_args, const float2 *d_params) { return gpu_compute_pair_forces<EvaluatorPairLoweThermo>(pair_args, d_params); }
68b64a09737f8d63caea878d98c8b31f9c2daff9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the vector length to be used, and compute its size int numElements = 0; int t; scanf("%d",&t); while(t--) { scanf("%d",&numElements); size_t size = numElements * sizeof(float); //printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { scanf("%f",&h_A[i]); } for (int i = 0; i < numElements; ++i) { scanf("%f",&h_B[i]); } // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory // printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; //printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. // printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } // printf("Test PASSED\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Reset the device and exit // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } for (int i = 0; i < numElements; ++i) { printf("%.2f ",h_C[i]); } printf("\n"); // Free host memory free(h_A); free(h_B); free(h_C); //printf("Done\n"); } return 0; }
68b64a09737f8d63caea878d98c8b31f9c2daff9.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = 0; int t; scanf("%d",&t); while(t--) { scanf("%d",&numElements); size_t size = numElements * sizeof(float); //printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { scanf("%f",&h_A[i]); } for (int i = 0; i < numElements; ++i) { scanf("%f",&h_B[i]); } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory // printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; //printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. // printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } // printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } for (int i = 0; i < numElements; ++i) { printf("%.2f ",h_C[i]); } printf("\n"); // Free host memory free(h_A); free(h_B); free(h_C); //printf("Done\n"); } return 0; }
dbcfd70847fdeb4ff242aea8f4640eea81c62bd9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions mixed zc -> ds @author Mark Gates */ #include "magma_internal.h" // mixed precision generation has issues with COMPLEX, so use PRECISION_z #define PRECISION_z #define BLK_X 64 #define BLK_Y 32 // TODO get rid of global variable! static __device__ int flag = 0; /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void zlat2c_lower( int n, const magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, double rmax ) { magmaDoubleComplex tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } } } /* Similar to zlat2c_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void zlat2c_upper( int n, const magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, double rmax ) { magmaDoubleComplex tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } } } } /***************************************************************************//** Purpose ------- ZLAT2C converts a double-complex matrix, A, to a single-complex matrix, SA. RMAX is the overflow for the single-complex arithmetic. ZLAT2C checks that all the entries of A are between -RMAX and RMAX. If not, the conversion is aborted and a flag is raised. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A COMPLEX_16 array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA COMPLEX array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value - = 1: an entry of the matrix A is greater than the COMPLEX overflow threshold, in this case, the content of SA on exit is unspecified. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lat2 *******************************************************************************/ extern "C" void magmablas_zlat2c( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex_const_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t ldsa, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } double rmax = (double)lapackf77_slamch("O"); dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0 if (uplo == MagmaLower) { hipLaunchKernelGGL(( zlat2c_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, rmax); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( zlat2c_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, rmax); } hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag }
dbcfd70847fdeb4ff242aea8f4640eea81c62bd9.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions mixed zc -> ds @author Mark Gates */ #include "magma_internal.h" // mixed precision generation has issues with COMPLEX, so use PRECISION_z #define PRECISION_z #define BLK_X 64 #define BLK_Y 32 // TODO get rid of global variable! static __device__ int flag = 0; /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void zlat2c_lower( int n, const magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, double rmax ) { magmaDoubleComplex tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } } } /* Similar to zlat2c_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void zlat2c_upper( int n, const magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, double rmax ) { magmaDoubleComplex tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } } } } /***************************************************************************//** Purpose ------- ZLAT2C converts a double-complex matrix, A, to a single-complex matrix, SA. RMAX is the overflow for the single-complex arithmetic. ZLAT2C checks that all the entries of A are between -RMAX and RMAX. If not, the conversion is aborted and a flag is raised. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A COMPLEX_16 array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA COMPLEX array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value - = 1: an entry of the matrix A is greater than the COMPLEX overflow threshold, in this case, the content of SA on exit is unspecified. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lat2 *******************************************************************************/ extern "C" void magmablas_zlat2c( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex_const_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t ldsa, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } double rmax = (double)lapackf77_slamch("O"); dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0 if (uplo == MagmaLower) { zlat2c_lower<<< grid, threads, 0, queue->cuda_stream() >>> (n, A, lda, SA, ldsa, rmax); } else if (uplo == MagmaUpper) { zlat2c_upper<<< grid, threads, 0, queue->cuda_stream() >>> (n, A, lda, SA, ldsa, rmax); } cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag }
3f95e5e4e1337a170b91caffcd99e57312966d7d.hip
// !!! This is a file automatically generated by hipify!!! // ************************************************************************* // // PARALUTION www.paralution.com // // Copyright (C) 2012-2014 Dimitar Lukarski // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // // ************************************************************************* // PARALUTION version 0.7.0 #include "gpu_matrix_csr.hpp" #include "gpu_matrix_coo.hpp" #include "gpu_matrix_dia.hpp" #include "gpu_matrix_ell.hpp" #include "gpu_matrix_hyb.hpp" #include "gpu_matrix_mcsr.hpp" #include "gpu_matrix_bcsr.hpp" #include "gpu_matrix_dense.hpp" #include "gpu_vector.hpp" #include "../host/host_matrix_hyb.hpp" #include "../base_matrix.hpp" #include "../base_vector.hpp" #include "../backend_manager.hpp" #include "../../utils/log.hpp" #include "../../utils/allocate_free.hpp" #include "gpu_utils.hpp" #include "cuda_kernels_general.hpp" #include "cuda_kernels_hyb.hpp" #include "cuda_kernels_vector.hpp" #include "gpu_allocate_free.hpp" #include "../matrix_formats_ind.hpp" #include <assert.h> #include <hip/hip_runtime.h> #include <cusparse_v2.h> namespace paralution { template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::GPUAcceleratorMatrixHYB() { // no default constructors LOG_INFO("no default constructor"); FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::GPUAcceleratorMatrixHYB(const Paralution_Backend_Descriptor local_backend) { LOG_DEBUG(this, "GPUAcceleratorMatrixHYB::GPUAcceleratorMatrixHYB()", "constructor with local_backend"); this->mat_.ELL.val = NULL; this->mat_.ELL.col = NULL; this->mat_.ELL.max_row = 0; this->mat_.COO.row = NULL; this->mat_.COO.col = NULL; this->mat_.COO.val = NULL; this->ell_nnz_ = 0; this->coo_nnz_ = 0; this->set_backend(local_backend); CHECK_CUDA_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::~GPUAcceleratorMatrixHYB() { LOG_DEBUG(this, "GPUAcceleratorMatrixHYB::~GPUAcceleratorMatrixHYB()", "destructor"); this->Clear(); } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::info(void) const { LOG_INFO("GPUAcceleratorMatrixHYB<ValueType>"); } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::AllocateHYB(const int ell_nnz, const int coo_nnz, const int ell_max_row, const int nrow, const int ncol) { assert( ell_nnz >= 0); assert( coo_nnz >= 0); assert( ell_max_row >= 0); assert( ncol >= 0); assert( nrow >= 0); if (this->get_nnz() > 0) this->Clear(); if (ell_nnz + coo_nnz > 0) { // ELL assert(ell_nnz == ell_max_row*nrow); allocate_gpu(ell_nnz, &this->mat_.ELL.val); allocate_gpu(ell_nnz, &this->mat_.ELL.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, ell_nnz, this->mat_.ELL.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, ell_nnz, this->mat_.ELL.col); this->mat_.ELL.max_row = ell_max_row; this->ell_nnz_ = ell_nnz; // COO allocate_gpu(coo_nnz, &this->mat_.COO.row); allocate_gpu(coo_nnz, &this->mat_.COO.col); allocate_gpu(coo_nnz, &this->mat_.COO.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.row); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.val); this->coo_nnz_ = coo_nnz; this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = ell_nnz + coo_nnz; } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::Clear() { if (this->get_nnz() > 0) { free_gpu(&this->mat_.COO.row); free_gpu(&this->mat_.COO.col); free_gpu(&this->mat_.COO.val); free_gpu(&this->mat_.ELL.val); free_gpu(&this->mat_.ELL.col); this->ell_nnz_ = 0; this->coo_nnz_ = 0; this->mat_.ELL.max_row = 0; this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) { const HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if (cast_mat = dynamic_cast<const HostMatrixHYB<ValueType>*> (&src)) { if (this->get_nnz() == 0) this->AllocateHYB(cast_mat->get_ell_nnz(), cast_mat->get_coo_nnz(), cast_mat->get_ell_max_row(), cast_mat->get_nrow(), cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(this->mat_.ELL.col, // dst cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.ELL.val, // dst cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(this->mat_.COO.row, // dst cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.col, // dst cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.val, // dst cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const { HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if (cast_mat = dynamic_cast<HostMatrixHYB<ValueType>*> (dst)) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&src)) { if (this->get_nnz() == 0) this->AllocateHYB(gpu_cast_mat->get_ell_nnz(), gpu_cast_mat->get_coo_nnz(), gpu_cast_mat->get_ell_max_row(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(this->mat_.ELL.col, // dst gpu_cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.ELL.val, // dst gpu_cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(this->mat_.COO.row, // dst gpu_cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.col, // dst gpu_cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.val, // dst gpu_cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if (host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) { this->CopyFromHost(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixHYB<ValueType>*> (dst)) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(gpu_cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(gpu_cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if (host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) { this->CopyToHost(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) { const HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if (cast_mat = dynamic_cast<const HostMatrixHYB<ValueType>*> (&src)) { if (this->get_nnz() == 0) this->AllocateHYB(cast_mat->get_ell_nnz(), cast_mat->get_coo_nnz(), cast_mat->get_ell_max_row(), cast_mat->get_nrow(), cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpyAsync(this->mat_.ELL.col, // dst cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.ELL.val, // dst cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpyAsync(this->mat_.COO.row, // dst cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.COO.col, // dst cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.COO.val, // dst cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const { HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if (cast_mat = dynamic_cast<HostMatrixHYB<ValueType>*> (dst)) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpyAsync(cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpyAsync(cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&src)) { if (this->get_nnz() == 0) this->AllocateHYB(gpu_cast_mat->get_ell_nnz(), gpu_cast_mat->get_coo_nnz(), gpu_cast_mat->get_ell_max_row(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(this->mat_.ELL.col, // dst gpu_cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.ELL.val, // dst gpu_cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(this->mat_.COO.row, // dst gpu_cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.col, // dst gpu_cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.val, // dst gpu_cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if (host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) { this->CopyFromHostAsync(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixHYB<ValueType>*> (dst)) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(gpu_cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(gpu_cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if (host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) { this->CopyToHostAsync(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> bool GPUAcceleratorMatrixHYB<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) { this->Clear(); // empty matrix is empty matrix if (mat.get_nnz() == 0) return true; const GPUAcceleratorMatrixHYB<ValueType> *cast_mat_hyb; if (cast_mat_hyb = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&mat)) { this->CopyFrom(*cast_mat_hyb); return true; } const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr; if (cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) { this->Clear(); int nrow = cast_mat_csr->get_nrow(); int ncol = cast_mat_csr->get_ncol(); int max_row = cast_mat_csr->get_nnz() / nrow; // get nnz per row for COO part int *nnz_coo = NULL; dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); allocate_gpu<int>(nrow, &nnz_coo); hipLaunchKernelGGL(( kernel_ell_nnz_coo<int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, max_row, cast_mat_csr->mat_.row_offset, nnz_coo); CHECK_CUDA_ERROR(__FILE__, __LINE__); // get nnz for COO part by summing up nnz per row array int *d_buffer = NULL; int *h_buffer = NULL; int GROUP_SIZE; int LOCAL_SIZE; int FinalReduceSize; allocate_gpu<int>(this->local_backend_.GPU_wrap * 4, &d_buffer); dim3 GridSize2(this->local_backend_.GPU_wrap * 4); GROUP_SIZE = ( size_t( ( size_t( nrow / ( this->local_backend_.GPU_wrap * 4 ) ) + 1 ) / this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size; LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size; hipLaunchKernelGGL(( kernel_reduce<int, int, 256>) , dim3(GridSize2), dim3(BlockSize), 0, 0, nrow, nnz_coo, d_buffer, GROUP_SIZE, LOCAL_SIZE); CHECK_CUDA_ERROR(__FILE__, __LINE__); FinalReduceSize = this->local_backend_.GPU_wrap * 4; allocate_host(FinalReduceSize, &h_buffer); hipMemcpy(h_buffer, // dst d_buffer, // src FinalReduceSize*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu<int>(&d_buffer); int num_nnz_coo = 0; for ( int i=0; i<FinalReduceSize; ++i ) num_nnz_coo += h_buffer[i]; free_host(&h_buffer); // allocate ELL and COO matrices int num_nnz_ell = max_row * nrow; assert(num_nnz_ell > 0); assert(num_nnz_coo > 0); this->AllocateHYB(num_nnz_ell, num_nnz_coo, max_row, nrow, ncol); hipMemset(this->mat_.ELL.col, -1, num_nnz_ell*sizeof(int)); CHECK_CUDA_ERROR(__FILE__, __LINE__); // copy up to num_cols_per_row values of row i into the ELL int *nnz_ell = NULL; allocate_gpu<int>(nrow, &nnz_ell); hipLaunchKernelGGL(( kernel_ell_fill_ell<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, max_row, cast_mat_csr->mat_.row_offset, cast_mat_csr->mat_.col, cast_mat_csr->mat_.val, this->mat_.ELL.col, this->mat_.ELL.val, nnz_ell); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO currently performing partial sum on host allocate_host(nrow, &h_buffer); hipMemcpy(h_buffer, // dst nnz_ell, // src nrow*sizeof(int), // size hipMemcpyDeviceToHost); for (int i=1; i<nrow; ++i) h_buffer[i] += h_buffer[i-1]; hipMemcpy(nnz_ell, // dst h_buffer, // src nrow*sizeof(int), // size hipMemcpyHostToDevice); free_host(&h_buffer); // end TODO // copy any remaining values in row i into the COO hipLaunchKernelGGL(( kernel_ell_fill_coo<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, cast_mat_csr->mat_.row_offset, cast_mat_csr->mat_.col, cast_mat_csr->mat_.val, nnz_coo, nnz_ell, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu<int>(&nnz_ell); free_gpu<int>(&nnz_coo); this->nrow_ = cast_mat_csr->get_nrow(); this->ncol_ = cast_mat_csr->get_ncol(); this->nnz_ = num_nnz_ell + num_nnz_coo; this->mat_.ELL.max_row = max_row; this->ell_nnz_ = num_nnz_ell; this->coo_nnz_ = num_nnz_coo; return true; } return false; } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::Apply(const BaseVector<ValueType> &in, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ; GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); // ELL if (this->get_ell_nnz() > 0) { int nrow = this->get_nrow(); int ncol = this->get_ncol(); int max_row = this->get_ell_max_row(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_ell_spmv<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, ncol, max_row, this->mat_.ELL.col, this->mat_.ELL.val, cast_in->vec_, cast_out->vec_ ); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // do not support super small matrices assert(this->get_coo_nnz() > this->local_backend_.GPU_wrap); // ---------------------------------------------------------- // Modified and adopted from CUSP 0.3.1, // http://code.google.com/p/cusp-library/ // NVIDIA, APACHE LICENSE 2.0 // ---------------------------------------------------------- // see __spmv_coo_flat(...) // ---------------------------------------------------------- // CHANGELOG // - adopted interface // ---------------------------------------------------------- const unsigned int BLOCK_SIZE = this->local_backend_.GPU_block_size; // const unsigned int MAX_BLOCKS = this->local_backend_.GPU_max_blocks; const unsigned int MAX_BLOCKS = 32; // cusp::detail::device::arch::max_active_blocks(spmv_coo_flat_kernel<IndexType, ValueType, BLOCK_SIZE, UseCache>, BLOCK_SIZE, (size_t) 0); const unsigned int WARPS_PER_BLOCK = BLOCK_SIZE / this->local_backend_.GPU_wrap; const unsigned int num_units = this->get_coo_nnz() / this->local_backend_.GPU_wrap; const unsigned int num_warps = ::min(num_units, WARPS_PER_BLOCK * MAX_BLOCKS); const unsigned int num_blocks = (num_warps + (WARPS_PER_BLOCK-1)) / WARPS_PER_BLOCK; // (N + (granularity - 1)) / granularity const unsigned int num_iters = (num_units + (num_warps-1)) / num_warps; const unsigned int interval_size = this->local_backend_.GPU_wrap * num_iters; const int tail = num_units * this->local_backend_.GPU_wrap; // do the last few nonzeros separately (fewer than this->local_backend_.GPU_wrap elements) const unsigned int active_warps = (interval_size == 0) ? 0 : ((tail + (interval_size-1))/interval_size); int *temp_rows = NULL; ValueType *temp_vals = NULL; allocate_gpu(active_warps, &temp_rows); allocate_gpu(active_warps, &temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 // WARP_SIZE == 32 hipLaunchKernelGGL(( kernel_spmv_coo_flat<int, ValueType, 256, 32>) , dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, tail, interval_size, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val, ValueType(1.0), cast_in->vec_, cast_out->vec_, temp_rows, temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 hipLaunchKernelGGL(( kernel_spmv_coo_reduce_update<int, ValueType, 256>) , dim3(1), dim3(BLOCK_SIZE), 0, 0, active_warps, temp_rows, temp_vals, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipLaunchKernelGGL(( kernel_spmv_coo_serial<int, ValueType>) , dim3(1),dim3(1), 0, 0, this->get_coo_nnz() - tail, this->mat_.COO.row + tail, this->mat_.COO.col + tail, this->mat_.COO.val + tail, ValueType(1.0), cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu(&temp_rows); free_gpu(&temp_vals); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::ApplyAdd(const BaseVector<ValueType> &in, const ValueType scalar, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ; GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); // ELL if (this->get_ell_nnz() > 0) { int nrow = this->get_nrow(); int ncol = this->get_ncol(); int max_row = this->get_ell_max_row(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(( kernel_ell_add_spmv<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, ncol, max_row, this->mat_.ELL.col, this->mat_.ELL.val, scalar, cast_in->vec_, cast_out->vec_ ); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // do not support super small matrices assert(this->get_coo_nnz() > this->local_backend_.GPU_wrap); // ---------------------------------------------------------- // Modified and adopted from CUSP 0.3.1, // http://code.google.com/p/cusp-library/ // NVIDIA, APACHE LICENSE 2.0 // ---------------------------------------------------------- // see __spmv_coo_flat(...) // ---------------------------------------------------------- // CHANGELOG // - adopted interface // ---------------------------------------------------------- const unsigned int BLOCK_SIZE = this->local_backend_.GPU_block_size; // const unsigned int MAX_BLOCKS = this->local_backend_.GPU_max_blocks; const unsigned int MAX_BLOCKS = 32; // cusp::detail::device::arch::max_active_blocks(spmv_coo_flat_kernel<IndexType, ValueType, BLOCK_SIZE, UseCache>, BLOCK_SIZE, (size_t) 0); const unsigned int WARPS_PER_BLOCK = BLOCK_SIZE / this->local_backend_.GPU_wrap; const unsigned int num_units = this->get_coo_nnz() / this->local_backend_.GPU_wrap; const unsigned int num_warps = ::min(num_units, WARPS_PER_BLOCK * MAX_BLOCKS); const unsigned int num_blocks = (num_warps + (WARPS_PER_BLOCK-1)) / WARPS_PER_BLOCK; // (N + (granularity - 1)) / granularity const unsigned int num_iters = (num_units + (num_warps-1)) / num_warps; const unsigned int interval_size = this->local_backend_.GPU_wrap * num_iters; const int tail = num_units * this->local_backend_.GPU_wrap; // do the last few nonzeros separately (fewer than this->local_backend_.GPU_wrap elements) const unsigned int active_warps = (interval_size == 0) ? 0 : ((tail + (interval_size-1))/interval_size); int *temp_rows = NULL; ValueType *temp_vals = NULL; allocate_gpu(active_warps, &temp_rows); allocate_gpu(active_warps, &temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 // WARP_SIZE == 32 hipLaunchKernelGGL(( kernel_spmv_coo_flat<int, ValueType, 256, 32>) , dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, tail, interval_size, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val, scalar, cast_in->vec_, cast_out->vec_, temp_rows, temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 hipLaunchKernelGGL(( kernel_spmv_coo_reduce_update<int, ValueType, 256>) , dim3(1), dim3(BLOCK_SIZE), 0, 0, active_warps, temp_rows, temp_vals, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipLaunchKernelGGL(( kernel_spmv_coo_serial<int, ValueType>) , dim3(1),dim3(1), 0, 0, this->get_coo_nnz() - tail, this->mat_.COO.row + tail, this->mat_.COO.col + tail, this->mat_.COO.val + tail, scalar, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu(&temp_rows); free_gpu(&temp_vals); } } } template class GPUAcceleratorMatrixHYB<double>; template class GPUAcceleratorMatrixHYB<float>; }
3f95e5e4e1337a170b91caffcd99e57312966d7d.cu
// ************************************************************************* // // PARALUTION www.paralution.com // // Copyright (C) 2012-2014 Dimitar Lukarski // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // // ************************************************************************* // PARALUTION version 0.7.0 #include "gpu_matrix_csr.hpp" #include "gpu_matrix_coo.hpp" #include "gpu_matrix_dia.hpp" #include "gpu_matrix_ell.hpp" #include "gpu_matrix_hyb.hpp" #include "gpu_matrix_mcsr.hpp" #include "gpu_matrix_bcsr.hpp" #include "gpu_matrix_dense.hpp" #include "gpu_vector.hpp" #include "../host/host_matrix_hyb.hpp" #include "../base_matrix.hpp" #include "../base_vector.hpp" #include "../backend_manager.hpp" #include "../../utils/log.hpp" #include "../../utils/allocate_free.hpp" #include "gpu_utils.hpp" #include "cuda_kernels_general.hpp" #include "cuda_kernels_hyb.hpp" #include "cuda_kernels_vector.hpp" #include "gpu_allocate_free.hpp" #include "../matrix_formats_ind.hpp" #include <assert.h> #include <cuda.h> #include <cusparse_v2.h> namespace paralution { template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::GPUAcceleratorMatrixHYB() { // no default constructors LOG_INFO("no default constructor"); FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::GPUAcceleratorMatrixHYB(const Paralution_Backend_Descriptor local_backend) { LOG_DEBUG(this, "GPUAcceleratorMatrixHYB::GPUAcceleratorMatrixHYB()", "constructor with local_backend"); this->mat_.ELL.val = NULL; this->mat_.ELL.col = NULL; this->mat_.ELL.max_row = 0; this->mat_.COO.row = NULL; this->mat_.COO.col = NULL; this->mat_.COO.val = NULL; this->ell_nnz_ = 0; this->coo_nnz_ = 0; this->set_backend(local_backend); CHECK_CUDA_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::~GPUAcceleratorMatrixHYB() { LOG_DEBUG(this, "GPUAcceleratorMatrixHYB::~GPUAcceleratorMatrixHYB()", "destructor"); this->Clear(); } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::info(void) const { LOG_INFO("GPUAcceleratorMatrixHYB<ValueType>"); } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::AllocateHYB(const int ell_nnz, const int coo_nnz, const int ell_max_row, const int nrow, const int ncol) { assert( ell_nnz >= 0); assert( coo_nnz >= 0); assert( ell_max_row >= 0); assert( ncol >= 0); assert( nrow >= 0); if (this->get_nnz() > 0) this->Clear(); if (ell_nnz + coo_nnz > 0) { // ELL assert(ell_nnz == ell_max_row*nrow); allocate_gpu(ell_nnz, &this->mat_.ELL.val); allocate_gpu(ell_nnz, &this->mat_.ELL.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, ell_nnz, this->mat_.ELL.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, ell_nnz, this->mat_.ELL.col); this->mat_.ELL.max_row = ell_max_row; this->ell_nnz_ = ell_nnz; // COO allocate_gpu(coo_nnz, &this->mat_.COO.row); allocate_gpu(coo_nnz, &this->mat_.COO.col); allocate_gpu(coo_nnz, &this->mat_.COO.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.row); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.val); this->coo_nnz_ = coo_nnz; this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = ell_nnz + coo_nnz; } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::Clear() { if (this->get_nnz() > 0) { free_gpu(&this->mat_.COO.row); free_gpu(&this->mat_.COO.col); free_gpu(&this->mat_.COO.val); free_gpu(&this->mat_.ELL.val); free_gpu(&this->mat_.ELL.col); this->ell_nnz_ = 0; this->coo_nnz_ = 0; this->mat_.ELL.max_row = 0; this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) { const HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if (cast_mat = dynamic_cast<const HostMatrixHYB<ValueType>*> (&src)) { if (this->get_nnz() == 0) this->AllocateHYB(cast_mat->get_ell_nnz(), cast_mat->get_coo_nnz(), cast_mat->get_ell_max_row(), cast_mat->get_nrow(), cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL cudaMemcpy(this->mat_.ELL.col, // dst cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.ELL.val, // dst cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO cudaMemcpy(this->mat_.COO.row, // dst cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.COO.col, // dst cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.COO.val, // dst cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const { HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if (cast_mat = dynamic_cast<HostMatrixHYB<ValueType>*> (dst)) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL cudaMemcpy(cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO cudaMemcpy(cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&src)) { if (this->get_nnz() == 0) this->AllocateHYB(gpu_cast_mat->get_ell_nnz(), gpu_cast_mat->get_coo_nnz(), gpu_cast_mat->get_ell_max_row(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL cudaMemcpy(this->mat_.ELL.col, // dst gpu_cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.ELL.val, // dst gpu_cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO cudaMemcpy(this->mat_.COO.row, // dst gpu_cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.COO.col, // dst gpu_cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.COO.val, // dst gpu_cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if (host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) { this->CopyFromHost(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixHYB<ValueType>*> (dst)) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL cudaMemcpy(gpu_cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO cudaMemcpy(gpu_cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if (host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) { this->CopyToHost(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) { const HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if (cast_mat = dynamic_cast<const HostMatrixHYB<ValueType>*> (&src)) { if (this->get_nnz() == 0) this->AllocateHYB(cast_mat->get_ell_nnz(), cast_mat->get_coo_nnz(), cast_mat->get_ell_max_row(), cast_mat->get_nrow(), cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL cudaMemcpyAsync(this->mat_.ELL.col, // dst cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(this->mat_.ELL.val, // dst cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO cudaMemcpyAsync(this->mat_.COO.row, // dst cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(this->mat_.COO.col, // dst cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(this->mat_.COO.val, // dst cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const { HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if (cast_mat = dynamic_cast<HostMatrixHYB<ValueType>*> (dst)) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL cudaMemcpyAsync(cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO cudaMemcpyAsync(cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpyAsync(cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&src)) { if (this->get_nnz() == 0) this->AllocateHYB(gpu_cast_mat->get_ell_nnz(), gpu_cast_mat->get_coo_nnz(), gpu_cast_mat->get_ell_max_row(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL cudaMemcpy(this->mat_.ELL.col, // dst gpu_cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.ELL.val, // dst gpu_cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO cudaMemcpy(this->mat_.COO.row, // dst gpu_cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.COO.col, // dst gpu_cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(this->mat_.COO.val, // dst gpu_cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if (host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) { this->CopyFromHostAsync(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixHYB<ValueType>*> (dst)) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL cudaMemcpy(gpu_cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO cudaMemcpy(gpu_cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); cudaMemcpy(gpu_cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size cudaMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if (host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) { this->CopyToHostAsync(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> bool GPUAcceleratorMatrixHYB<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) { this->Clear(); // empty matrix is empty matrix if (mat.get_nnz() == 0) return true; const GPUAcceleratorMatrixHYB<ValueType> *cast_mat_hyb; if (cast_mat_hyb = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&mat)) { this->CopyFrom(*cast_mat_hyb); return true; } const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr; if (cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) { this->Clear(); int nrow = cast_mat_csr->get_nrow(); int ncol = cast_mat_csr->get_ncol(); int max_row = cast_mat_csr->get_nnz() / nrow; // get nnz per row for COO part int *nnz_coo = NULL; dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); allocate_gpu<int>(nrow, &nnz_coo); kernel_ell_nnz_coo<int> <<<GridSize, BlockSize>>> (nrow, max_row, cast_mat_csr->mat_.row_offset, nnz_coo); CHECK_CUDA_ERROR(__FILE__, __LINE__); // get nnz for COO part by summing up nnz per row array int *d_buffer = NULL; int *h_buffer = NULL; int GROUP_SIZE; int LOCAL_SIZE; int FinalReduceSize; allocate_gpu<int>(this->local_backend_.GPU_wrap * 4, &d_buffer); dim3 GridSize2(this->local_backend_.GPU_wrap * 4); GROUP_SIZE = ( size_t( ( size_t( nrow / ( this->local_backend_.GPU_wrap * 4 ) ) + 1 ) / this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size; LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size; kernel_reduce<int, int, 256> <<<GridSize2, BlockSize>>> (nrow, nnz_coo, d_buffer, GROUP_SIZE, LOCAL_SIZE); CHECK_CUDA_ERROR(__FILE__, __LINE__); FinalReduceSize = this->local_backend_.GPU_wrap * 4; allocate_host(FinalReduceSize, &h_buffer); cudaMemcpy(h_buffer, // dst d_buffer, // src FinalReduceSize*sizeof(int), // size cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu<int>(&d_buffer); int num_nnz_coo = 0; for ( int i=0; i<FinalReduceSize; ++i ) num_nnz_coo += h_buffer[i]; free_host(&h_buffer); // allocate ELL and COO matrices int num_nnz_ell = max_row * nrow; assert(num_nnz_ell > 0); assert(num_nnz_coo > 0); this->AllocateHYB(num_nnz_ell, num_nnz_coo, max_row, nrow, ncol); cudaMemset(this->mat_.ELL.col, -1, num_nnz_ell*sizeof(int)); CHECK_CUDA_ERROR(__FILE__, __LINE__); // copy up to num_cols_per_row values of row i into the ELL int *nnz_ell = NULL; allocate_gpu<int>(nrow, &nnz_ell); kernel_ell_fill_ell<ValueType, int> <<<GridSize, BlockSize>>> (nrow, max_row, cast_mat_csr->mat_.row_offset, cast_mat_csr->mat_.col, cast_mat_csr->mat_.val, this->mat_.ELL.col, this->mat_.ELL.val, nnz_ell); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO currently performing partial sum on host allocate_host(nrow, &h_buffer); cudaMemcpy(h_buffer, // dst nnz_ell, // src nrow*sizeof(int), // size cudaMemcpyDeviceToHost); for (int i=1; i<nrow; ++i) h_buffer[i] += h_buffer[i-1]; cudaMemcpy(nnz_ell, // dst h_buffer, // src nrow*sizeof(int), // size cudaMemcpyHostToDevice); free_host(&h_buffer); // end TODO // copy any remaining values in row i into the COO kernel_ell_fill_coo<ValueType, int> <<<GridSize, BlockSize>>> (nrow, cast_mat_csr->mat_.row_offset, cast_mat_csr->mat_.col, cast_mat_csr->mat_.val, nnz_coo, nnz_ell, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu<int>(&nnz_ell); free_gpu<int>(&nnz_coo); this->nrow_ = cast_mat_csr->get_nrow(); this->ncol_ = cast_mat_csr->get_ncol(); this->nnz_ = num_nnz_ell + num_nnz_coo; this->mat_.ELL.max_row = max_row; this->ell_nnz_ = num_nnz_ell; this->coo_nnz_ = num_nnz_coo; return true; } return false; } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::Apply(const BaseVector<ValueType> &in, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ; GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); // ELL if (this->get_ell_nnz() > 0) { int nrow = this->get_nrow(); int ncol = this->get_ncol(); int max_row = this->get_ell_max_row(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_ell_spmv<ValueType, int> <<<GridSize, BlockSize>>> (nrow, ncol, max_row, this->mat_.ELL.col, this->mat_.ELL.val, cast_in->vec_, cast_out->vec_ ); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // do not support super small matrices assert(this->get_coo_nnz() > this->local_backend_.GPU_wrap); // ---------------------------------------------------------- // Modified and adopted from CUSP 0.3.1, // http://code.google.com/p/cusp-library/ // NVIDIA, APACHE LICENSE 2.0 // ---------------------------------------------------------- // see __spmv_coo_flat(...) // ---------------------------------------------------------- // CHANGELOG // - adopted interface // ---------------------------------------------------------- const unsigned int BLOCK_SIZE = this->local_backend_.GPU_block_size; // const unsigned int MAX_BLOCKS = this->local_backend_.GPU_max_blocks; const unsigned int MAX_BLOCKS = 32; // cusp::detail::device::arch::max_active_blocks(spmv_coo_flat_kernel<IndexType, ValueType, BLOCK_SIZE, UseCache>, BLOCK_SIZE, (size_t) 0); const unsigned int WARPS_PER_BLOCK = BLOCK_SIZE / this->local_backend_.GPU_wrap; const unsigned int num_units = this->get_coo_nnz() / this->local_backend_.GPU_wrap; const unsigned int num_warps = std::min(num_units, WARPS_PER_BLOCK * MAX_BLOCKS); const unsigned int num_blocks = (num_warps + (WARPS_PER_BLOCK-1)) / WARPS_PER_BLOCK; // (N + (granularity - 1)) / granularity const unsigned int num_iters = (num_units + (num_warps-1)) / num_warps; const unsigned int interval_size = this->local_backend_.GPU_wrap * num_iters; const int tail = num_units * this->local_backend_.GPU_wrap; // do the last few nonzeros separately (fewer than this->local_backend_.GPU_wrap elements) const unsigned int active_warps = (interval_size == 0) ? 0 : ((tail + (interval_size-1))/interval_size); int *temp_rows = NULL; ValueType *temp_vals = NULL; allocate_gpu(active_warps, &temp_rows); allocate_gpu(active_warps, &temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 // WARP_SIZE == 32 kernel_spmv_coo_flat<int, ValueType, 256, 32> <<<num_blocks, BLOCK_SIZE>>> (tail, interval_size, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val, ValueType(1.0), cast_in->vec_, cast_out->vec_, temp_rows, temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 kernel_spmv_coo_reduce_update<int, ValueType, 256> <<<1, BLOCK_SIZE>>> (active_warps, temp_rows, temp_vals, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); kernel_spmv_coo_serial<int, ValueType> <<<1,1>>> (this->get_coo_nnz() - tail, this->mat_.COO.row + tail, this->mat_.COO.col + tail, this->mat_.COO.val + tail, ValueType(1.0), cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu(&temp_rows); free_gpu(&temp_vals); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::ApplyAdd(const BaseVector<ValueType> &in, const ValueType scalar, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ; GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); // ELL if (this->get_ell_nnz() > 0) { int nrow = this->get_nrow(); int ncol = this->get_ncol(); int max_row = this->get_ell_max_row(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); kernel_ell_add_spmv<ValueType, int> <<<GridSize, BlockSize>>> (nrow, ncol, max_row, this->mat_.ELL.col, this->mat_.ELL.val, scalar, cast_in->vec_, cast_out->vec_ ); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // do not support super small matrices assert(this->get_coo_nnz() > this->local_backend_.GPU_wrap); // ---------------------------------------------------------- // Modified and adopted from CUSP 0.3.1, // http://code.google.com/p/cusp-library/ // NVIDIA, APACHE LICENSE 2.0 // ---------------------------------------------------------- // see __spmv_coo_flat(...) // ---------------------------------------------------------- // CHANGELOG // - adopted interface // ---------------------------------------------------------- const unsigned int BLOCK_SIZE = this->local_backend_.GPU_block_size; // const unsigned int MAX_BLOCKS = this->local_backend_.GPU_max_blocks; const unsigned int MAX_BLOCKS = 32; // cusp::detail::device::arch::max_active_blocks(spmv_coo_flat_kernel<IndexType, ValueType, BLOCK_SIZE, UseCache>, BLOCK_SIZE, (size_t) 0); const unsigned int WARPS_PER_BLOCK = BLOCK_SIZE / this->local_backend_.GPU_wrap; const unsigned int num_units = this->get_coo_nnz() / this->local_backend_.GPU_wrap; const unsigned int num_warps = std::min(num_units, WARPS_PER_BLOCK * MAX_BLOCKS); const unsigned int num_blocks = (num_warps + (WARPS_PER_BLOCK-1)) / WARPS_PER_BLOCK; // (N + (granularity - 1)) / granularity const unsigned int num_iters = (num_units + (num_warps-1)) / num_warps; const unsigned int interval_size = this->local_backend_.GPU_wrap * num_iters; const int tail = num_units * this->local_backend_.GPU_wrap; // do the last few nonzeros separately (fewer than this->local_backend_.GPU_wrap elements) const unsigned int active_warps = (interval_size == 0) ? 0 : ((tail + (interval_size-1))/interval_size); int *temp_rows = NULL; ValueType *temp_vals = NULL; allocate_gpu(active_warps, &temp_rows); allocate_gpu(active_warps, &temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 // WARP_SIZE == 32 kernel_spmv_coo_flat<int, ValueType, 256, 32> <<<num_blocks, BLOCK_SIZE>>> (tail, interval_size, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val, scalar, cast_in->vec_, cast_out->vec_, temp_rows, temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 kernel_spmv_coo_reduce_update<int, ValueType, 256> <<<1, BLOCK_SIZE>>> (active_warps, temp_rows, temp_vals, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); kernel_spmv_coo_serial<int, ValueType> <<<1,1>>> (this->get_coo_nnz() - tail, this->mat_.COO.row + tail, this->mat_.COO.col + tail, this->mat_.COO.val + tail, scalar, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu(&temp_rows); free_gpu(&temp_vals); } } } template class GPUAcceleratorMatrixHYB<double>; template class GPUAcceleratorMatrixHYB<float>; }
ea5be871d5b06da7a012622131d99a9aba9dcf3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stereo.h" __global__ void Cv8uToGrayKernel(uchar *d_iCv8u, float *d_iGray, int width, int height, int stride) { int r = blockIdx.y * blockDim.y + threadIdx.y; // current row int c = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((r < height) && (c < width)) { int idx = c + stride * r; // current pixel index //d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z; d_iGray[idx] = (float)d_iCv8u[idx] / 256.0f; } } void Stereo::Cv8uToGray(uchar * d_iCv8u, float *d_iGray, int w, int h, int s) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); Cv8uToGrayKernel << < blocks, threads >> > (d_iCv8u, d_iGray, w, h, s); } __global__ void Cv8uc3ToGrayKernel(uchar3 *d_iRgb, float *d_iGray, int width, int height, int stride) { int r = blockIdx.y * blockDim.y + threadIdx.y; // current row int c = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((r < height) && (c < width)) { int idx = c + stride * r; // current pixel index uchar3 pixel = d_iRgb[idx]; //d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z; d_iGray[idx] = ((float)pixel.x + (float)pixel.y + (float)pixel.z) / 3; d_iGray[idx] = d_iGray[idx] / 256.0f; } } void Stereo::Cv8uc3ToGray(uchar3 * d_iRgb, float *d_iGray, int w, int h, int s) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); Cv8uc3ToGrayKernel << < blocks, threads >> > (d_iRgb, d_iGray, w, h, s); }
ea5be871d5b06da7a012622131d99a9aba9dcf3c.cu
#include "stereo.h" __global__ void Cv8uToGrayKernel(uchar *d_iCv8u, float *d_iGray, int width, int height, int stride) { int r = blockIdx.y * blockDim.y + threadIdx.y; // current row int c = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((r < height) && (c < width)) { int idx = c + stride * r; // current pixel index //d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z; d_iGray[idx] = (float)d_iCv8u[idx] / 256.0f; } } void Stereo::Cv8uToGray(uchar * d_iCv8u, float *d_iGray, int w, int h, int s) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); Cv8uToGrayKernel << < blocks, threads >> > (d_iCv8u, d_iGray, w, h, s); } __global__ void Cv8uc3ToGrayKernel(uchar3 *d_iRgb, float *d_iGray, int width, int height, int stride) { int r = blockIdx.y * blockDim.y + threadIdx.y; // current row int c = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((r < height) && (c < width)) { int idx = c + stride * r; // current pixel index uchar3 pixel = d_iRgb[idx]; //d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z; d_iGray[idx] = ((float)pixel.x + (float)pixel.y + (float)pixel.z) / 3; d_iGray[idx] = d_iGray[idx] / 256.0f; } } void Stereo::Cv8uc3ToGray(uchar3 * d_iRgb, float *d_iGray, int w, int h, int s) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); Cv8uc3ToGrayKernel << < blocks, threads >> > (d_iRgb, d_iGray, w, h, s); }
48104e9877453ac368d783c0f169cc259aac3d6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Simple example demonstrating how to use MPI with CUDA * * Generate some random numbers on one node. * Dispatch them to all nodes. * Compute their square root on each node's GPU. * Compute the average of the results using MPI. * * simpleMPI.cu: GPU part, compiled with nvcc */ #include <iostream> using std::cerr; using std::endl; #include "simpleMPI.h" // Error handling macro #define CUDA_CHECK(call) \ if((call) != hipSuccess) { \ hipError_t err = hipGetLastError(); \ cerr << "CUDA error calling \""#call"\", code is " << err << endl; \ my_abort(err); } // Device code // Very simple GPU Kernel that computes square roots of input numbers __global__ void simpleMPIKernel(float *input_a, float *input_b, float *output) { int tid = blockIdx.x * blockDim.x + threadIdx.x; output[tid] = sqrt(input_a[tid]*input_a[tid] + input_b[tid]*input_b[tid]); } // Initialize an array with random data (between 0 and 1) void initData(float *data, int dataSize) { for (int i = 0; i < dataSize; i++) { data[i] = (float)rand() / RAND_MAX; } } // CUDA computation on each node // No MPI here, only CUDA void computeGPU(float *hostData_a, float *hostData_b, int blockSize, int gridSize) { int dataSize = blockSize * gridSize; // Allocate data on GPU memory float *deviceInputData_a = NULL; CUDA_CHECK(hipMalloc((void **)&deviceInputData_a, dataSize * sizeof(float))); float *deviceInputData_b = NULL; CUDA_CHECK(hipMalloc((void **)&deviceInputData_b, dataSize * sizeof(float))); float *deviceOutputData = NULL; CUDA_CHECK(hipMalloc((void **)&deviceOutputData, dataSize * sizeof(float))); // Copy to GPU memory CUDA_CHECK(hipMemcpy(deviceInputData_a, hostData_a, dataSize * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(deviceInputData_b, hostData_b, dataSize * sizeof(float), hipMemcpyHostToDevice)); // Run kernel hipLaunchKernelGGL(( simpleMPIKernel), dim3(gridSize), dim3(blockSize), 0, 0, deviceInputData_a, deviceInputData_b, deviceOutputData); // Copy data back to CPU memory CUDA_CHECK(hipMemcpy(hostData_a, deviceOutputData, dataSize *sizeof(float), hipMemcpyDeviceToHost)); // Free GPU memory CUDA_CHECK(hipFree(deviceInputData_a)); CUDA_CHECK(hipFree(deviceInputData_b)); CUDA_CHECK(hipFree(deviceOutputData)); } float max_here(float *data, int size) { float max_val = data[0]; for (int i = 1; i < size; i++) { if(data[i] > max_val){ max_val = data[i]; } } return max_val; }
48104e9877453ac368d783c0f169cc259aac3d6d.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Simple example demonstrating how to use MPI with CUDA * * Generate some random numbers on one node. * Dispatch them to all nodes. * Compute their square root on each node's GPU. * Compute the average of the results using MPI. * * simpleMPI.cu: GPU part, compiled with nvcc */ #include <iostream> using std::cerr; using std::endl; #include "simpleMPI.h" // Error handling macro #define CUDA_CHECK(call) \ if((call) != cudaSuccess) { \ cudaError_t err = cudaGetLastError(); \ cerr << "CUDA error calling \""#call"\", code is " << err << endl; \ my_abort(err); } // Device code // Very simple GPU Kernel that computes square roots of input numbers __global__ void simpleMPIKernel(float *input_a, float *input_b, float *output) { int tid = blockIdx.x * blockDim.x + threadIdx.x; output[tid] = sqrt(input_a[tid]*input_a[tid] + input_b[tid]*input_b[tid]); } // Initialize an array with random data (between 0 and 1) void initData(float *data, int dataSize) { for (int i = 0; i < dataSize; i++) { data[i] = (float)rand() / RAND_MAX; } } // CUDA computation on each node // No MPI here, only CUDA void computeGPU(float *hostData_a, float *hostData_b, int blockSize, int gridSize) { int dataSize = blockSize * gridSize; // Allocate data on GPU memory float *deviceInputData_a = NULL; CUDA_CHECK(cudaMalloc((void **)&deviceInputData_a, dataSize * sizeof(float))); float *deviceInputData_b = NULL; CUDA_CHECK(cudaMalloc((void **)&deviceInputData_b, dataSize * sizeof(float))); float *deviceOutputData = NULL; CUDA_CHECK(cudaMalloc((void **)&deviceOutputData, dataSize * sizeof(float))); // Copy to GPU memory CUDA_CHECK(cudaMemcpy(deviceInputData_a, hostData_a, dataSize * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(deviceInputData_b, hostData_b, dataSize * sizeof(float), cudaMemcpyHostToDevice)); // Run kernel simpleMPIKernel<<<gridSize, blockSize>>>(deviceInputData_a, deviceInputData_b, deviceOutputData); // Copy data back to CPU memory CUDA_CHECK(cudaMemcpy(hostData_a, deviceOutputData, dataSize *sizeof(float), cudaMemcpyDeviceToHost)); // Free GPU memory CUDA_CHECK(cudaFree(deviceInputData_a)); CUDA_CHECK(cudaFree(deviceInputData_b)); CUDA_CHECK(cudaFree(deviceOutputData)); } float max_here(float *data, int size) { float max_val = data[0]; for (int i = 1; i < size; i++) { if(data[i] > max_val){ max_val = data[i]; } } return max_val; }
43d6745655e6318e2c56f0b6116abb4259422cdc.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sigmoid_layer_tester_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "../nn_types.h" static __forceinline__ __device__ float sigmoid(float x) { return __fdividef(1.0F, 1.0F + __expf(-x)); } __global__ void sigmoid_kernel( float4 * __restrict input, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = sigmoid(val.x); val.y = sigmoid(val.y); val.z = sigmoid(val.z); val.w = sigmoid(val.w); input[elem_id] = val; } } namespace nnforge { namespace cuda { sigmoid_layer_tester_cuda::sigmoid_layer_tester_cuda() { } sigmoid_layer_tester_cuda::~sigmoid_layer_tester_cuda() { } void sigmoid_layer_tester_cuda::enqueue_test( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, cuda_linear_buffer_device_smart_ptr input_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( sigmoid_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_buffer, elem_count); } } }
43d6745655e6318e2c56f0b6116abb4259422cdc.cu
/* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sigmoid_layer_tester_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "../nn_types.h" static __forceinline__ __device__ float sigmoid(float x) { return __fdividef(1.0F, 1.0F + __expf(-x)); } __global__ void sigmoid_kernel( float4 * __restrict input, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = sigmoid(val.x); val.y = sigmoid(val.y); val.z = sigmoid(val.z); val.w = sigmoid(val.w); input[elem_id] = val; } } namespace nnforge { namespace cuda { sigmoid_layer_tester_cuda::sigmoid_layer_tester_cuda() { } sigmoid_layer_tester_cuda::~sigmoid_layer_tester_cuda() { } void sigmoid_layer_tester_cuda::enqueue_test( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, cuda_linear_buffer_device_smart_ptr input_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); sigmoid_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_buffer, elem_count); } } }
9db6c43c751f8d5f7f9f1fdbdc81d2e7265a6608.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/detail/gather.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/lists/explode.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/optional.h> #include <memory> #include <type_traits> namespace cudf { namespace detail { // explode column gather map uses cudf::out_of_bounds_policy::NULLIFY to // fill nulls where there are invalid indices constexpr size_type InvalidIndex = -1; namespace { std::unique_ptr<table> build_table( table_view const& input_table, size_type const explode_column_idx, column_view const& sliced_child, cudf::device_span<size_type const> gather_map, thrust::optional<cudf::device_span<size_type const>> explode_col_gather_map, thrust::optional<rmm::device_uvector<size_type>> position_array, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto select_iter = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [explode_column_idx](size_type i) { return i >= explode_column_idx ? i + 1 : i; }); auto gathered_table = detail::gather(input_table.select(select_iter, select_iter + input_table.num_columns() - 1), gather_map.begin(), gather_map.end(), cudf::out_of_bounds_policy::DONT_CHECK, stream, mr); std::vector<std::unique_ptr<column>> columns = gathered_table.release()->release(); columns.insert(columns.begin() + explode_column_idx, explode_col_gather_map ? std::move(detail::gather(table_view({sliced_child}), explode_col_gather_map->begin(), explode_col_gather_map->end(), cudf::out_of_bounds_policy::NULLIFY, stream, mr) ->release()[0]) : std::make_unique<column>(sliced_child, stream, mr)); if (position_array) { size_type position_size = position_array->size(); // build the null mask for position based on invalid entries in gather map auto nullmask = explode_col_gather_map ? valid_if( explode_col_gather_map->begin(), explode_col_gather_map->end(), [] __device__(auto i) { return i != InvalidIndex; }, stream, mr) : std::pair<rmm::device_buffer, size_type>{ rmm::device_buffer(0, stream), size_type{0}}; columns.insert(columns.begin() + explode_column_idx, std::make_unique<column>(data_type(type_to_id<size_type>()), position_size, position_array->release(), std::move(nullmask.first), nullmask.second)); } return std::make_unique<table>(std::move(columns)); } } // namespace std::unique_ptr<table> explode(table_view const& input_table, size_type const explode_column_idx, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { lists_column_view explode_col{input_table.column(explode_column_idx)}; auto sliced_child = explode_col.get_sliced_child(stream); rmm::device_uvector<size_type> gather_map(sliced_child.size(), stream); // Sliced columns may require rebasing of the offsets. auto offsets = explode_col.offsets_begin(); // offsets + 1 here to skip the 0th offset, which removes a - 1 operation later. auto offsets_minus_one = thrust::make_transform_iterator( thrust::next(offsets), [offsets] __device__(auto i) { return (i - offsets[0]) - 1; }); auto counting_iter = thrust::make_counting_iterator(0); // This looks like an off-by-one bug, but what is going on here is that we need to reduce each // result from `lower_bound` by 1 to build the correct gather map. This can be accomplished by // skipping the first entry and using the result of `lower_bound` directly. thrust::lower_bound(rmm::exec_policy(stream), offsets_minus_one, offsets_minus_one + explode_col.size(), counting_iter, counting_iter + gather_map.size(), gather_map.begin()); return build_table(input_table, explode_column_idx, sliced_child, gather_map, thrust::nullopt, thrust::nullopt, stream, mr); } std::unique_ptr<table> explode_position(table_view const& input_table, size_type const explode_column_idx, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { lists_column_view explode_col{input_table.column(explode_column_idx)}; auto sliced_child = explode_col.get_sliced_child(stream); rmm::device_uvector<size_type> gather_map(sliced_child.size(), stream); // Sliced columns may require rebasing of the offsets. auto offsets = explode_col.offsets_begin(); // offsets + 1 here to skip the 0th offset, which removes a - 1 operation later. auto offsets_minus_one = thrust::make_transform_iterator( offsets + 1, [offsets] __device__(auto i) { return (i - offsets[0]) - 1; }); auto counting_iter = thrust::make_counting_iterator(0); rmm::device_uvector<size_type> pos(sliced_child.size(), stream, mr); // This looks like an off-by-one bug, but what is going on here is that we need to reduce each // result from `lower_bound` by 1 to build the correct gather map. This can be accomplished by // skipping the first entry and using the result of `lower_bound` directly. thrust::transform( rmm::exec_policy(stream), counting_iter, counting_iter + gather_map.size(), gather_map.begin(), [position_array = pos.data(), offsets_minus_one, offsets, offset_size = explode_col.size()] __device__(auto idx) -> size_type { auto lb_idx = thrust::distance( offsets_minus_one, thrust::lower_bound(thrust::seq, offsets_minus_one, offsets_minus_one + offset_size, idx)); position_array[idx] = idx - (offsets[lb_idx] - offsets[0]); return lb_idx; }); return build_table(input_table, explode_column_idx, sliced_child, gather_map, thrust::nullopt, std::move(pos), stream, mr); } std::unique_ptr<table> explode_outer(table_view const& input_table, size_type const explode_column_idx, bool include_position, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { lists_column_view explode_col{input_table.column(explode_column_idx)}; auto sliced_child = explode_col.get_sliced_child(stream); auto counting_iter = thrust::make_counting_iterator(0); auto offsets = explode_col.offsets_begin(); // number of nulls or empty lists found so far in the explode column rmm::device_uvector<size_type> null_or_empty_offset(explode_col.size(), stream); auto null_or_empty = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [offsets, offsets_size = explode_col.size() - 1] __device__(int idx) { return (idx > offsets_size || (offsets[idx + 1] != offsets[idx])) ? 0 : 1; }); thrust::inclusive_scan(rmm::exec_policy(stream), null_or_empty, null_or_empty + explode_col.size(), null_or_empty_offset.begin()); auto null_or_empty_count = null_or_empty_offset.size() > 0 ? null_or_empty_offset.back_element(stream) : 0; if (null_or_empty_count == 0) { // performance penalty to run the below loop if there are no nulls or empty lists. // run simple explode instead return include_position ? explode_position(input_table, explode_column_idx, stream, mr) : explode(input_table, explode_column_idx, stream, mr); } auto gather_map_size = sliced_child.size() + null_or_empty_count; rmm::device_uvector<size_type> gather_map(gather_map_size, stream); rmm::device_uvector<size_type> explode_col_gather_map(gather_map_size, stream); rmm::device_uvector<size_type> pos(include_position ? gather_map_size : 0, stream, mr); // offsets + 1 here to skip the 0th offset, which removes a - 1 operation later. auto offsets_minus_one = thrust::make_transform_iterator( thrust::next(offsets), [offsets] __device__(auto i) { return (i - offsets[0]) - 1; }); auto fill_gather_maps = [offsets_minus_one, gather_map_p = gather_map.begin(), explode_col_gather_map_p = explode_col_gather_map.begin(), position_array = pos.begin(), sliced_child_size = sliced_child.size(), null_or_empty_offset_p = null_or_empty_offset.begin(), include_position, offsets, null_or_empty, offset_size = explode_col.offsets().size() - 1] __device__(auto idx) { if (idx < sliced_child_size) { auto lb_idx = thrust::distance(offsets_minus_one, thrust::lower_bound( thrust::seq, offsets_minus_one, offsets_minus_one + (offset_size), idx)); auto index_to_write = null_or_empty_offset_p[lb_idx] + idx; gather_map_p[index_to_write] = lb_idx; explode_col_gather_map_p[index_to_write] = idx; if (include_position) { position_array[index_to_write] = idx - (offsets[lb_idx] - offsets[0]); } } if (null_or_empty[idx]) { auto invalid_index = null_or_empty_offset_p[idx] == 0 ? offsets[idx] : offsets[idx] + null_or_empty_offset_p[idx] - 1; gather_map_p[invalid_index] = idx; explode_col_gather_map_p[invalid_index] = InvalidIndex; if (include_position) { position_array[invalid_index] = 0; } } }; // we need to do this loop at least explode_col times or we may not properly fill in null and // empty entries. auto loop_count = ::max(sliced_child.size(), explode_col.size()); // Fill in gather map with all the child column's entries thrust::for_each( rmm::exec_policy(stream), counting_iter, counting_iter + loop_count, fill_gather_maps); return build_table( input_table, explode_column_idx, sliced_child, gather_map, explode_col_gather_map, include_position ? std::move(pos) : thrust::optional<rmm::device_uvector<size_type>>{}, stream, mr); } } // namespace detail /** * @copydoc cudf::explode(table_view const&, size_type, rmm::mr::device_memory_resource*) */ std::unique_ptr<table> explode(table_view const& input_table, size_type explode_column_idx, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST, "Unsupported non-list column"); return detail::explode(input_table, explode_column_idx, rmm::cuda_stream_default, mr); } /** * @copydoc cudf::explode_position(table_view const&, size_type, rmm::mr::device_memory_resource*) */ std::unique_ptr<table> explode_position(table_view const& input_table, size_type explode_column_idx, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST, "Unsupported non-list column"); return detail::explode_position(input_table, explode_column_idx, rmm::cuda_stream_default, mr); } /** * @copydoc cudf::explode_outer(table_view const&, size_type, rmm::mr::device_memory_resource*) */ std::unique_ptr<table> explode_outer(table_view const& input_table, size_type explode_column_idx, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST, "Unsupported non-list column"); return detail::explode_outer( input_table, explode_column_idx, false, rmm::cuda_stream_default, mr); } /** * @copydoc cudf::explode_outer_position(table_view const&, size_type, * rmm::mr::device_memory_resource*) */ std::unique_ptr<table> explode_outer_position(table_view const& input_table, size_type explode_column_idx, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST, "Unsupported non-list column"); return detail::explode_outer(input_table, explode_column_idx, true, rmm::cuda_stream_default, mr); } } // namespace cudf
9db6c43c751f8d5f7f9f1fdbdc81d2e7265a6608.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/detail/gather.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/lists/explode.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/optional.h> #include <memory> #include <type_traits> namespace cudf { namespace detail { // explode column gather map uses cudf::out_of_bounds_policy::NULLIFY to // fill nulls where there are invalid indices constexpr size_type InvalidIndex = -1; namespace { std::unique_ptr<table> build_table( table_view const& input_table, size_type const explode_column_idx, column_view const& sliced_child, cudf::device_span<size_type const> gather_map, thrust::optional<cudf::device_span<size_type const>> explode_col_gather_map, thrust::optional<rmm::device_uvector<size_type>> position_array, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto select_iter = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [explode_column_idx](size_type i) { return i >= explode_column_idx ? i + 1 : i; }); auto gathered_table = detail::gather(input_table.select(select_iter, select_iter + input_table.num_columns() - 1), gather_map.begin(), gather_map.end(), cudf::out_of_bounds_policy::DONT_CHECK, stream, mr); std::vector<std::unique_ptr<column>> columns = gathered_table.release()->release(); columns.insert(columns.begin() + explode_column_idx, explode_col_gather_map ? std::move(detail::gather(table_view({sliced_child}), explode_col_gather_map->begin(), explode_col_gather_map->end(), cudf::out_of_bounds_policy::NULLIFY, stream, mr) ->release()[0]) : std::make_unique<column>(sliced_child, stream, mr)); if (position_array) { size_type position_size = position_array->size(); // build the null mask for position based on invalid entries in gather map auto nullmask = explode_col_gather_map ? valid_if( explode_col_gather_map->begin(), explode_col_gather_map->end(), [] __device__(auto i) { return i != InvalidIndex; }, stream, mr) : std::pair<rmm::device_buffer, size_type>{ rmm::device_buffer(0, stream), size_type{0}}; columns.insert(columns.begin() + explode_column_idx, std::make_unique<column>(data_type(type_to_id<size_type>()), position_size, position_array->release(), std::move(nullmask.first), nullmask.second)); } return std::make_unique<table>(std::move(columns)); } } // namespace std::unique_ptr<table> explode(table_view const& input_table, size_type const explode_column_idx, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { lists_column_view explode_col{input_table.column(explode_column_idx)}; auto sliced_child = explode_col.get_sliced_child(stream); rmm::device_uvector<size_type> gather_map(sliced_child.size(), stream); // Sliced columns may require rebasing of the offsets. auto offsets = explode_col.offsets_begin(); // offsets + 1 here to skip the 0th offset, which removes a - 1 operation later. auto offsets_minus_one = thrust::make_transform_iterator( thrust::next(offsets), [offsets] __device__(auto i) { return (i - offsets[0]) - 1; }); auto counting_iter = thrust::make_counting_iterator(0); // This looks like an off-by-one bug, but what is going on here is that we need to reduce each // result from `lower_bound` by 1 to build the correct gather map. This can be accomplished by // skipping the first entry and using the result of `lower_bound` directly. thrust::lower_bound(rmm::exec_policy(stream), offsets_minus_one, offsets_minus_one + explode_col.size(), counting_iter, counting_iter + gather_map.size(), gather_map.begin()); return build_table(input_table, explode_column_idx, sliced_child, gather_map, thrust::nullopt, thrust::nullopt, stream, mr); } std::unique_ptr<table> explode_position(table_view const& input_table, size_type const explode_column_idx, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { lists_column_view explode_col{input_table.column(explode_column_idx)}; auto sliced_child = explode_col.get_sliced_child(stream); rmm::device_uvector<size_type> gather_map(sliced_child.size(), stream); // Sliced columns may require rebasing of the offsets. auto offsets = explode_col.offsets_begin(); // offsets + 1 here to skip the 0th offset, which removes a - 1 operation later. auto offsets_minus_one = thrust::make_transform_iterator( offsets + 1, [offsets] __device__(auto i) { return (i - offsets[0]) - 1; }); auto counting_iter = thrust::make_counting_iterator(0); rmm::device_uvector<size_type> pos(sliced_child.size(), stream, mr); // This looks like an off-by-one bug, but what is going on here is that we need to reduce each // result from `lower_bound` by 1 to build the correct gather map. This can be accomplished by // skipping the first entry and using the result of `lower_bound` directly. thrust::transform( rmm::exec_policy(stream), counting_iter, counting_iter + gather_map.size(), gather_map.begin(), [position_array = pos.data(), offsets_minus_one, offsets, offset_size = explode_col.size()] __device__(auto idx) -> size_type { auto lb_idx = thrust::distance( offsets_minus_one, thrust::lower_bound(thrust::seq, offsets_minus_one, offsets_minus_one + offset_size, idx)); position_array[idx] = idx - (offsets[lb_idx] - offsets[0]); return lb_idx; }); return build_table(input_table, explode_column_idx, sliced_child, gather_map, thrust::nullopt, std::move(pos), stream, mr); } std::unique_ptr<table> explode_outer(table_view const& input_table, size_type const explode_column_idx, bool include_position, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { lists_column_view explode_col{input_table.column(explode_column_idx)}; auto sliced_child = explode_col.get_sliced_child(stream); auto counting_iter = thrust::make_counting_iterator(0); auto offsets = explode_col.offsets_begin(); // number of nulls or empty lists found so far in the explode column rmm::device_uvector<size_type> null_or_empty_offset(explode_col.size(), stream); auto null_or_empty = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [offsets, offsets_size = explode_col.size() - 1] __device__(int idx) { return (idx > offsets_size || (offsets[idx + 1] != offsets[idx])) ? 0 : 1; }); thrust::inclusive_scan(rmm::exec_policy(stream), null_or_empty, null_or_empty + explode_col.size(), null_or_empty_offset.begin()); auto null_or_empty_count = null_or_empty_offset.size() > 0 ? null_or_empty_offset.back_element(stream) : 0; if (null_or_empty_count == 0) { // performance penalty to run the below loop if there are no nulls or empty lists. // run simple explode instead return include_position ? explode_position(input_table, explode_column_idx, stream, mr) : explode(input_table, explode_column_idx, stream, mr); } auto gather_map_size = sliced_child.size() + null_or_empty_count; rmm::device_uvector<size_type> gather_map(gather_map_size, stream); rmm::device_uvector<size_type> explode_col_gather_map(gather_map_size, stream); rmm::device_uvector<size_type> pos(include_position ? gather_map_size : 0, stream, mr); // offsets + 1 here to skip the 0th offset, which removes a - 1 operation later. auto offsets_minus_one = thrust::make_transform_iterator( thrust::next(offsets), [offsets] __device__(auto i) { return (i - offsets[0]) - 1; }); auto fill_gather_maps = [offsets_minus_one, gather_map_p = gather_map.begin(), explode_col_gather_map_p = explode_col_gather_map.begin(), position_array = pos.begin(), sliced_child_size = sliced_child.size(), null_or_empty_offset_p = null_or_empty_offset.begin(), include_position, offsets, null_or_empty, offset_size = explode_col.offsets().size() - 1] __device__(auto idx) { if (idx < sliced_child_size) { auto lb_idx = thrust::distance(offsets_minus_one, thrust::lower_bound( thrust::seq, offsets_minus_one, offsets_minus_one + (offset_size), idx)); auto index_to_write = null_or_empty_offset_p[lb_idx] + idx; gather_map_p[index_to_write] = lb_idx; explode_col_gather_map_p[index_to_write] = idx; if (include_position) { position_array[index_to_write] = idx - (offsets[lb_idx] - offsets[0]); } } if (null_or_empty[idx]) { auto invalid_index = null_or_empty_offset_p[idx] == 0 ? offsets[idx] : offsets[idx] + null_or_empty_offset_p[idx] - 1; gather_map_p[invalid_index] = idx; explode_col_gather_map_p[invalid_index] = InvalidIndex; if (include_position) { position_array[invalid_index] = 0; } } }; // we need to do this loop at least explode_col times or we may not properly fill in null and // empty entries. auto loop_count = std::max(sliced_child.size(), explode_col.size()); // Fill in gather map with all the child column's entries thrust::for_each( rmm::exec_policy(stream), counting_iter, counting_iter + loop_count, fill_gather_maps); return build_table( input_table, explode_column_idx, sliced_child, gather_map, explode_col_gather_map, include_position ? std::move(pos) : thrust::optional<rmm::device_uvector<size_type>>{}, stream, mr); } } // namespace detail /** * @copydoc cudf::explode(table_view const&, size_type, rmm::mr::device_memory_resource*) */ std::unique_ptr<table> explode(table_view const& input_table, size_type explode_column_idx, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST, "Unsupported non-list column"); return detail::explode(input_table, explode_column_idx, rmm::cuda_stream_default, mr); } /** * @copydoc cudf::explode_position(table_view const&, size_type, rmm::mr::device_memory_resource*) */ std::unique_ptr<table> explode_position(table_view const& input_table, size_type explode_column_idx, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST, "Unsupported non-list column"); return detail::explode_position(input_table, explode_column_idx, rmm::cuda_stream_default, mr); } /** * @copydoc cudf::explode_outer(table_view const&, size_type, rmm::mr::device_memory_resource*) */ std::unique_ptr<table> explode_outer(table_view const& input_table, size_type explode_column_idx, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST, "Unsupported non-list column"); return detail::explode_outer( input_table, explode_column_idx, false, rmm::cuda_stream_default, mr); } /** * @copydoc cudf::explode_outer_position(table_view const&, size_type, * rmm::mr::device_memory_resource*) */ std::unique_ptr<table> explode_outer_position(table_view const& input_table, size_type explode_column_idx, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST, "Unsupported non-list column"); return detail::explode_outer(input_table, explode_column_idx, true, rmm::cuda_stream_default, mr); } } // namespace cudf
ee25c64ef639c9ea52773bb3bc1a9297857e0a1e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> int main(){ int deviceCount; hipGetDeviceCount(&deviceCount); int device; for (device = 0; device < deviceCount; ++device) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); printf("Device %d (%s) has compute capability %d.%d.\n", device, deviceProp.name, deviceProp.major, deviceProp.minor); } }
ee25c64ef639c9ea52773bb3bc1a9297857e0a1e.cu
#include <cuda_runtime.h> #include <stdio.h> int main(){ int deviceCount; cudaGetDeviceCount(&deviceCount); int device; for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device %d (%s) has compute capability %d.%d.\n", device, deviceProp.name, deviceProp.major, deviceProp.minor); } }
b674cac1c1180c704464e8d8fcc4ff102ddba66d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "array_ops.h" __global__ void multiplyKernel(float* dev_a, float* dev_b, float* dev_c, int size) { int index = blockIdx.x*blockDim.x+threadIdx.x; if(index < size) { dev_c[index] = dev_a[index]*dev_b[index]; } } __host__ void floatArrayMultiplyCuda(float* dev_a, float* dev_b, float* dev_c, int size) { int tileSize = 128; dim3 threads(tileSize); dim3 blocks((int)ceil(float(size)/float(tileSize))); hipLaunchKernelGGL(( multiplyKernel), dim3(threads),dim3(blocks), 0, 0, dev_a, dev_b, dev_c, size); } __global__ void addKernel(float* dev_a, float* dev_b, float* dev_c, int size) { int index = blockIdx.x*blockDim.x+threadIdx.x; if(index < size) { dev_c[index] = dev_a[index] + dev_b[index]; } } __host__ void floatArrayAdditionCuda(float* dev_a, float* dev_b, float* dev_c, int size) { int tileSize = 128; dim3 threads(tileSize); dim3 blocks((int)ceil(float(size)/float(tileSize))); hipLaunchKernelGGL(( addKernel), dim3(threads),dim3(blocks), 0, 0, dev_a, dev_b, dev_c, size); } __global__ void subtractKernel(float* dev_a, float* dev_b, float* dev_c, int size) { int index = blockIdx.x*blockDim.x+threadIdx.x; if(index < size) { dev_c[index] = dev_a[index] - dev_b[index]; } } __host__ void floatArraySubtractionCuda(float* dev_a, float* dev_b, float* dev_c, int size) { int tileSize = 128; dim3 threads(tileSize); dim3 blocks((int)ceil(float(size)/float(tileSize))); hipLaunchKernelGGL(( subtractKernel), dim3(threads),dim3(blocks), 0, 0, dev_a, dev_b, dev_c, size); } __global__ void divisionKernel(float* dev_a, float* dev_b, float* dev_c, int size) { int index = blockIdx.x*blockDim.x+threadIdx.x; if(index < size) { dev_c[index] = dev_a[index] / dev_b[index]; } } __host__ void floatArrayDivisionCuda(float* dev_a, float* dev_b, float* dev_c, int size) { int tileSize = 128; dim3 threads(tileSize); dim3 blocks((int)ceil(float(size)/float(tileSize))); hipLaunchKernelGGL(( subtractKernel), dim3(threads),dim3(blocks), 0, 0, dev_a, dev_b, dev_c, size); }
b674cac1c1180c704464e8d8fcc4ff102ddba66d.cu
#include "array_ops.h" __global__ void multiplyKernel(float* dev_a, float* dev_b, float* dev_c, int size) { int index = blockIdx.x*blockDim.x+threadIdx.x; if(index < size) { dev_c[index] = dev_a[index]*dev_b[index]; } } __host__ void floatArrayMultiplyCuda(float* dev_a, float* dev_b, float* dev_c, int size) { int tileSize = 128; dim3 threads(tileSize); dim3 blocks((int)ceil(float(size)/float(tileSize))); multiplyKernel<<<threads,blocks>>>(dev_a, dev_b, dev_c, size); } __global__ void addKernel(float* dev_a, float* dev_b, float* dev_c, int size) { int index = blockIdx.x*blockDim.x+threadIdx.x; if(index < size) { dev_c[index] = dev_a[index] + dev_b[index]; } } __host__ void floatArrayAdditionCuda(float* dev_a, float* dev_b, float* dev_c, int size) { int tileSize = 128; dim3 threads(tileSize); dim3 blocks((int)ceil(float(size)/float(tileSize))); addKernel<<<threads,blocks>>>(dev_a, dev_b, dev_c, size); } __global__ void subtractKernel(float* dev_a, float* dev_b, float* dev_c, int size) { int index = blockIdx.x*blockDim.x+threadIdx.x; if(index < size) { dev_c[index] = dev_a[index] - dev_b[index]; } } __host__ void floatArraySubtractionCuda(float* dev_a, float* dev_b, float* dev_c, int size) { int tileSize = 128; dim3 threads(tileSize); dim3 blocks((int)ceil(float(size)/float(tileSize))); subtractKernel<<<threads,blocks>>>(dev_a, dev_b, dev_c, size); } __global__ void divisionKernel(float* dev_a, float* dev_b, float* dev_c, int size) { int index = blockIdx.x*blockDim.x+threadIdx.x; if(index < size) { dev_c[index] = dev_a[index] / dev_b[index]; } } __host__ void floatArrayDivisionCuda(float* dev_a, float* dev_b, float* dev_c, int size) { int tileSize = 128; dim3 threads(tileSize); dim3 blocks((int)ceil(float(size)/float(tileSize))); subtractKernel<<<threads,blocks>>>(dev_a, dev_b, dev_c, size); }
1138f35e763b85cca0209e64ea51d0a98de60c59.hip
// !!! This is a file automatically generated by hipify!!! // GPUHello world #include <stdio.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void helloFromGPU (void) { printf("Hello World from GPU!\n"); } int main(void) { // hello from cpu printf("Hello World from CPU!\n"); for(int i=0;i<1000;i++) { hipLaunchKernelGGL(( helloFromGPU) , dim3(1), dim3(100), 0, 0, ); hipDeviceReset(); } return 0; }
1138f35e763b85cca0209e64ea51d0a98de60c59.cu
// 使用GPU输出Hello world #include <stdio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void helloFromGPU (void) { printf("Hello World from GPU!\n"); } int main(void) { // hello from cpu printf("Hello World from CPU!\n"); for(int i=0;i<1000;i++) { helloFromGPU <<<1, 100>>>(); cudaDeviceReset(); } return 0; }
f34ee35118d6e549ac3ab8410f253e5c6935e588.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; namespace plat = paddle::platform; namespace paddle { namespace operators { template <typename T> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, T> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { DivRangeFunctor<T> functor(x->data<T>(), y->data<T>(), z->data<T>()); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, x->numel()); for_range(functor); } }; template <> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, platform::float16> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { auto size = x->numel(); dim3 grid_size = dim3(((size + 7) / 8 + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); const half* x2 = reinterpret_cast<const half*>(x->data<platform::float16>()); const half* y2 = reinterpret_cast<const half*>(y->data<platform::float16>()); half* z2 = reinterpret_cast<half*>(z->data<platform::float16>()); hipLaunchKernelGGL(( SameDimsElemwiseDivCUDAKernel), dim3(grid_size), dim3(block_size), 0, ctx.template device_context<platform::CUDADeviceContext>().stream(), x2, y2, z2, size); } }; template <typename T> static __global__ void SimpleElemwiseDivGradCUDAKernel(const T* x, const T* y, const T* out, const T* dout, int64_t size, T* dx, T* dy) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < size) { T o = dout[col]; dx[col] = o / y[col]; dy[col] = -o * out[col] / y[col]; col += blockDim.x * gridDim.x; } } template <> __global__ void SimpleElemwiseDivGradCUDAKernel<paddle::platform::complex64>( const paddle::platform::complex64* x, const paddle::platform::complex64* y, const paddle::platform::complex64* out, const paddle::platform::complex64* dout, int64_t size, paddle::platform::complex64* dx, paddle::platform::complex64* dy) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < size) { paddle::platform::complex64 o = dout[col]; paddle::platform::complex64 y_conj(y[col].real, -y[col].imag); paddle::platform::complex64 out_div_y_conj((out[col] / y[col]).real, -(out[col] / y[col]).imag); dx[col] = o / y_conj; dy[col] = -o * out_div_y_conj; col += blockDim.x * gridDim.x; } } template <> __global__ void SimpleElemwiseDivGradCUDAKernel<paddle::platform::complex128>( const paddle::platform::complex128* x, const paddle::platform::complex128* y, const paddle::platform::complex128* out, const paddle::platform::complex128* dout, int64_t size, paddle::platform::complex128* dx, paddle::platform::complex128* dy) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < size) { paddle::platform::complex128 o = dout[col]; paddle::platform::complex128 y_conj(y[col].real, -y[col].imag); paddle::platform::complex128 out_div_y_conj((out[col] / y[col]).real, -(out[col] / y[col]).imag); dx[col] = o / y_conj; dy[col] = -o * out_div_y_conj; col += blockDim.x * gridDim.x; } } template <typename DeviceContext, typename T> typename std::enable_if< std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type elementwise_div_grad(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, const framework::Tensor* dout, framework::Tensor* dx, framework::Tensor* dy) { dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); auto size = x->numel(); dim3 grid_size = dim3((size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); hipLaunchKernelGGL(( SimpleElemwiseDivGradCUDAKernel< T>), dim3(grid_size), dim3(block_size), 0, ctx.template device_context<plat::CUDADeviceContext>().stream(), x->data<T>(), y->data<T>(), out->data<T>(), dout->data<T>(), size, dx->mutable_data<T>(ctx.GetPlace()), dy->mutable_data<T>(ctx.GetPlace())); } } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( elementwise_div, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad_grad, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>);
f34ee35118d6e549ac3ab8410f253e5c6935e588.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/platform/complex128.h" #include "paddle/fluid/platform/complex64.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; namespace plat = paddle::platform; namespace paddle { namespace operators { template <typename T> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, T> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { DivRangeFunctor<T> functor(x->data<T>(), y->data<T>(), z->data<T>()); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, x->numel()); for_range(functor); } }; template <> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, platform::float16> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { auto size = x->numel(); dim3 grid_size = dim3(((size + 7) / 8 + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); const half* x2 = reinterpret_cast<const half*>(x->data<platform::float16>()); const half* y2 = reinterpret_cast<const half*>(y->data<platform::float16>()); half* z2 = reinterpret_cast<half*>(z->data<platform::float16>()); SameDimsElemwiseDivCUDAKernel<<< grid_size, block_size, 0, ctx.template device_context<platform::CUDADeviceContext>().stream()>>>( x2, y2, z2, size); } }; template <typename T> static __global__ void SimpleElemwiseDivGradCUDAKernel(const T* x, const T* y, const T* out, const T* dout, int64_t size, T* dx, T* dy) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < size) { T o = dout[col]; dx[col] = o / y[col]; dy[col] = -o * out[col] / y[col]; col += blockDim.x * gridDim.x; } } template <> __global__ void SimpleElemwiseDivGradCUDAKernel<paddle::platform::complex64>( const paddle::platform::complex64* x, const paddle::platform::complex64* y, const paddle::platform::complex64* out, const paddle::platform::complex64* dout, int64_t size, paddle::platform::complex64* dx, paddle::platform::complex64* dy) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < size) { paddle::platform::complex64 o = dout[col]; paddle::platform::complex64 y_conj(y[col].real, -y[col].imag); paddle::platform::complex64 out_div_y_conj((out[col] / y[col]).real, -(out[col] / y[col]).imag); dx[col] = o / y_conj; dy[col] = -o * out_div_y_conj; col += blockDim.x * gridDim.x; } } template <> __global__ void SimpleElemwiseDivGradCUDAKernel<paddle::platform::complex128>( const paddle::platform::complex128* x, const paddle::platform::complex128* y, const paddle::platform::complex128* out, const paddle::platform::complex128* dout, int64_t size, paddle::platform::complex128* dx, paddle::platform::complex128* dy) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < size) { paddle::platform::complex128 o = dout[col]; paddle::platform::complex128 y_conj(y[col].real, -y[col].imag); paddle::platform::complex128 out_div_y_conj((out[col] / y[col]).real, -(out[col] / y[col]).imag); dx[col] = o / y_conj; dy[col] = -o * out_div_y_conj; col += blockDim.x * gridDim.x; } } template <typename DeviceContext, typename T> typename std::enable_if< std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type elementwise_div_grad(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, const framework::Tensor* dout, framework::Tensor* dx, framework::Tensor* dy) { dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); auto size = x->numel(); dim3 grid_size = dim3((size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); SimpleElemwiseDivGradCUDAKernel< T><<<grid_size, block_size, 0, ctx.template device_context<plat::CUDADeviceContext>().stream()>>>( x->data<T>(), y->data<T>(), out->data<T>(), dout->data<T>(), size, dx->mutable_data<T>(ctx.GetPlace()), dy->mutable_data<T>(ctx.GetPlace())); } } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( elementwise_div, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad_grad, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex64>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex128>);
96f32262e61cfb07db4c5698893bb3942454f0f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void Iterate_Kernel(int size, int *originIn, int *originOut, int *bestSeenIn, int *bestSeenOut, int *adjIndexes, int *adjacency) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int bestSeen = bestSeenIn[idx]; int origin = originIn[idx]; if (bestSeen < 1000001) { int start = adjIndexes[idx]; int end = adjIndexes[idx + 1]; // Look at all the neighbors and take best values: for (int i = start; i < end; i++) { int neighbor = adjacency[i]; int challenger = bestSeenIn[neighbor]; int challengerOrigin = originIn[neighbor]; if (challenger > 0 && challenger == bestSeen && challengerOrigin > origin) { origin = challengerOrigin; } if (challenger > bestSeen) { bestSeen = challenger; origin = challengerOrigin; } } } // Write out the best values found bestSeenOut[idx] = bestSeen; originOut[idx] = origin; } }
96f32262e61cfb07db4c5698893bb3942454f0f7.cu
#include "includes.h" __global__ void Iterate_Kernel(int size, int *originIn, int *originOut, int *bestSeenIn, int *bestSeenOut, int *adjIndexes, int *adjacency) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int bestSeen = bestSeenIn[idx]; int origin = originIn[idx]; if (bestSeen < 1000001) { int start = adjIndexes[idx]; int end = adjIndexes[idx + 1]; // Look at all the neighbors and take best values: for (int i = start; i < end; i++) { int neighbor = adjacency[i]; int challenger = bestSeenIn[neighbor]; int challengerOrigin = originIn[neighbor]; if (challenger > 0 && challenger == bestSeen && challengerOrigin > origin) { origin = challengerOrigin; } if (challenger > bestSeen) { bestSeen = challenger; origin = challengerOrigin; } } } // Write out the best values found bestSeenOut[idx] = bestSeen; originOut[idx] = origin; } }
1178580339f8d852f1904983a9d054d4f0f9cf69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Clion , . #ifdef __JETBRAINS_IDE__ #define __host__ #define __device__ #define __shared__ #define __constant__ #define __global__ // This is slightly mental, but gets it to properly index device function calls like __popc and whatever. #define __HIPCC__ #include <hip/device_functions.h> // These headers are all implicitly present when you compile CUDA with clang. Clion doesn't know that, so // we include them explicitly to make the indexer happy. Doing this when you actually build is, obviously, // a terrible idea :D #include <__clang_cuda_builtin_vars.h> #include <__clang_cuda_intrinsics.h> #include <__clang_cuda_math_forward_declares.h> #include <__clang_cuda_complex_builtins.h> #include <__clang_cuda_cmath.h> #endif // __JETBRAINS_IDE__ #include <fstream> #include <cassert> #include "BMP.h" // https://github.com/sol-prog/cpp-bmp-images /** * @brief n^2. * @details . * @details . * @param arr . * @param size . */ __device__ void sort(uint16_t *arr, size_t size) { for (int i = 0; i < size; i++) { for(int j = i + 1; j < size; j++) { if (arr[j] < arr[i]) { uint16_t temp = arr[j]; arr[j] = arr[i]; arr[i] = temp; } } } } /** * @brief . * @param d_input 3 * input_width * input_height. * @param input_width . * @param input_height . * @param d_output 3 * input_width * input_height. * @param output_width . * @param output_height . * @param filter_size ( filter_size * filter_size). */ __global__ void filter( uint8_t *d_input, size_t input_width, size_t input_height, uint8_t *d_output, size_t output_width, size_t output_height, size_t filter_size) { /** * , shared , global . * , , * shared . * global shared . * Dynamic shared memory kernel. * , - . * . */ // . const int output_center_column_no = blockIdx.x * blockDim.x + threadIdx.x; const int output_center_row_no = blockIdx.y * blockDim.y + threadIdx.y; if ((output_center_column_no < output_width) && (output_center_row_no < output_height)) { size_t input_center_row_no = output_center_row_no + filter_size / 2; size_t input_center_column_no = output_center_column_no + filter_size / 2; // , , - extern __shared__ uint8_t pixels[]; size_t pixels_width = blockDim.x + (filter_size / 2) * 2; size_t pixels_height = pixels_width; uint8_t* r = pixels; uint8_t* g = pixels + pixels_width * pixels_height; uint8_t* b = pixels + 2 * pixels_width * pixels_height; if (threadIdx.x > blockDim.x - filter_size / 2 - 1 && threadIdx.y > blockDim.y - filter_size / 2 - 1) { // - size_t pixel_idx = (input_center_row_no + filter_size / 2) * input_width + input_center_column_no + filter_size / 2; r[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 2]; } if (threadIdx.x > blockDim.x - filter_size / 2 - 1 && threadIdx.y < filter_size / 2) { // - size_t pixel_idx = (input_center_row_no - filter_size / 2) * input_width + (input_center_column_no + filter_size / 2); r[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 2]; } if (threadIdx.x < filter_size / 2 && threadIdx.y > blockDim.y - filter_size / 2 - 1) { // - size_t pixel_idx = (input_center_row_no + filter_size / 2) * input_width + (input_center_column_no - filter_size / 2); r[(threadIdx.x) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 2]; } if (threadIdx.x < filter_size / 2 && threadIdx.y < filter_size / 2) { // - size_t pixel_idx = (input_center_row_no - filter_size / 2) * input_width + (input_center_column_no - filter_size / 2); r[(threadIdx.x) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 2]; } if (threadIdx.x < filter_size / 2) { // - size_t pixel_idx = input_center_row_no * input_width + (input_center_column_no - filter_size / 2); r[(threadIdx.x) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 2]; } if (threadIdx.x > blockDim.x - filter_size / 2 - 1) { // - size_t pixel_idx = input_center_row_no * input_width + (input_center_column_no + filter_size / 2); r[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 2]; } if (threadIdx.y < filter_size / 2) { // - size_t pixel_idx = (input_center_row_no - filter_size / 2) * input_width + input_center_column_no; r[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 2]; } if (threadIdx.y > blockDim.y - filter_size / 2 - 1) { // - size_t pixel_idx = (input_center_row_no + filter_size / 2) * input_width + input_center_column_no; r[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 2]; } // size_t pixel_idx = input_center_row_no * input_width + input_center_column_no; r[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 2]; // , shared . __syncthreads(); // , . uint16_t* window_r = new uint16_t[filter_size * filter_size]; uint16_t* window_g = new uint16_t[filter_size * filter_size]; uint16_t* window_b = new uint16_t[filter_size * filter_size]; size_t window_idx = 0; // . for (int i = 0; i < filter_size; i ++) { for (int j = 0; j < filter_size; j++) { window_r[window_idx] = r[(threadIdx.x + i) * pixels_width + threadIdx.y + j]; window_g[window_idx] = g[(threadIdx.x + i) * pixels_width + threadIdx.y + j]; window_b[window_idx] = b[(threadIdx.x + i) * pixels_width + threadIdx.y + j]; window_idx += 1; } } // . sort(window_r, filter_size * filter_size); sort(window_g, filter_size * filter_size); sort(window_b, filter_size * filter_size); // . uint8_t r_median = static_cast<uint8_t>((window_r[filter_size * filter_size / 2] + window_r[(filter_size * filter_size + 1) / 2]) / 2); uint8_t g_median = static_cast<uint8_t>((window_g[filter_size * filter_size / 2] + window_g[(filter_size * filter_size + 1) / 2]) / 2); uint8_t b_median = static_cast<uint8_t>((window_b[filter_size * filter_size / 2] + window_b[(filter_size * filter_size + 1) / 2]) / 2); // . size_t output_center_idx = output_center_row_no * output_width + output_center_column_no; d_output[3 * output_center_idx + 0] = r_median; d_output[3 * output_center_idx + 1] = g_median; d_output[3 * output_center_idx + 2] = b_median; // . delete[] window_r; delete[] window_g; delete[] window_b; } } int main(int argc, char *argv[]) { // CUDA: https://developer.nvidia.com/blog/easy-introduction-cuda-c-and-c/ assert(argc == 4); // SimpleFilter input.bmp output.bmp filter_size // . const size_t filter_size = std::stoul(argv[3]); // . BMP input_image(argv[1]); assert(input_image.data.size() == 3 * input_image.bmp_info_header.width * input_image.bmp_info_header.height); // . BMP output_image(input_image.bmp_info_header.width - filter_size + 1, input_image.bmp_info_header.height - filter_size + 1, false); assert(output_image.data.size() == 3 * output_image.bmp_info_header.width * output_image.bmp_info_header.height); // . uint8_t* d_input, *d_output; hipMalloc(&d_input, input_image.data.size()); hipMalloc(&d_output, output_image.data.size()); hipMemcpy(d_input, input_image.data.data(), input_image.data.size(), hipMemcpyHostToDevice); // . // : // https://medium.com/@erangadulshan.14/1d-2d-and-3d-thread-allocation-for-loops-in-cuda-e0f908537a52 // https://www.researchgate.net/post/The_optimal_number_of_threads_per_block_in_CUDA_programming dim3 dimBlock(16, 16); dim3 dimGrid((output_image.bmp_info_header.width + 15) / 16, (output_image.bmp_info_header.height + 15) / 16); // : https://stackoverflow.com/questions/24942073/dynamic-shared-memory-in-cuda const size_t shared_memory_size = (dimBlock.x + (filter_size / 2) * 2) * (dimBlock.x + (filter_size / 2) * 2) * 3; hipLaunchKernelGGL(( filter), dim3(dimGrid), dim3(dimBlock), shared_memory_size, 0, d_input, input_image.bmp_info_header.width, input_image.bmp_info_header.height, d_output, output_image.bmp_info_header.width, output_image.bmp_info_header.height, filter_size); // . hipMemcpy(const_cast<uint8_t*>(output_image.data.data()), d_output, output_image.data.size(), hipMemcpyDeviceToHost); // . output_image.write(argv[2]); // . hipFree(d_input); hipFree(d_output); }
1178580339f8d852f1904983a9d054d4f0f9cf69.cu
// Чтобы в Clion было удобнее работать, добавим некоторые определения. #ifdef __JETBRAINS_IDE__ #define __host__ #define __device__ #define __shared__ #define __constant__ #define __global__ // This is slightly mental, but gets it to properly index device function calls like __popc and whatever. #define __CUDACC__ #include <device_functions.h> // These headers are all implicitly present when you compile CUDA with clang. Clion doesn't know that, so // we include them explicitly to make the indexer happy. Doing this when you actually build is, obviously, // a terrible idea :D #include <__clang_cuda_builtin_vars.h> #include <__clang_cuda_intrinsics.h> #include <__clang_cuda_math_forward_declares.h> #include <__clang_cuda_complex_builtins.h> #include <__clang_cuda_cmath.h> #endif // __JETBRAINS_IDE__ #include <fstream> #include <cassert> #include "BMP.h" // https://github.com/sol-prog/cpp-bmp-images /** * @brief Простая сортировка элементов массива за n^2. * @details Эффективно только для маленьких массивов. * @details На маленьких массивах накладные расходы более сложных алгоритмов сортировки не всегда себя окупают. * @param arr Массив. * @param size Размер массива. */ __device__ void sort(uint16_t *arr, size_t size) { for (int i = 0; i < size; i++) { for(int j = i + 1; j < size; j++) { if (arr[j] < arr[i]) { uint16_t temp = arr[j]; arr[j] = arr[i]; arr[i] = temp; } } } } /** * @brief Медианный фильтр. * @param d_input Массив пикселей исходного изображения размером 3 * input_width * input_height. * @param input_width Ширина в пикселях исходного изображения. * @param input_height Высота в пикселях исходного изображения. * @param d_output Массив пикселей отфильтрованного изображения размером 3 * input_width * input_height. * @param output_width Ширина в пикселях отфильтрованного изображения. * @param output_height Высота в пикселях отфильтрованного изображения. * @param filter_size Размер области для поиска медианы (квадратная область filter_size * filter_size). */ __global__ void filter( uint8_t *d_input, size_t input_width, size_t input_height, uint8_t *d_output, size_t output_width, size_t output_height, size_t filter_size) { /** * Прочитал, что обращение к shared памяти может быть быстрее в сотни раз, чем обращение к global памяти. * В случаях, когда обращение к элементам производится более одного раза, * предварительное копирование элементов в shared память часто даёт хороший прирост производительности. * Решил попробовать копировать элементы из global в shared перед дальнейшей работой. * Dynamic shared memory допускает использование только одного массива в kernel. * Кроме того, необходимо использовать клетки-призраки на границах блоков. * Эти факторы привели к довольно страшной индексации элементов. */ // Адресуемся к пикселям исходного и итогового изображений. const int output_center_column_no = blockIdx.x * blockDim.x + threadIdx.x; const int output_center_row_no = blockIdx.y * blockDim.y + threadIdx.y; if ((output_center_column_no < output_width) && (output_center_row_no < output_height)) { size_t input_center_row_no = output_center_row_no + filter_size / 2; size_t input_center_column_no = output_center_column_no + filter_size / 2; // массивы пикселей, за которые отвечает данный блок, с клетками-призраками extern __shared__ uint8_t pixels[]; size_t pixels_width = blockDim.x + (filter_size / 2) * 2; size_t pixels_height = pixels_width; uint8_t* r = pixels; uint8_t* g = pixels + pixels_width * pixels_height; uint8_t* b = pixels + 2 * pixels_width * pixels_height; if (threadIdx.x > blockDim.x - filter_size / 2 - 1 && threadIdx.y > blockDim.y - filter_size / 2 - 1) { // правые нижние клетки-призраки size_t pixel_idx = (input_center_row_no + filter_size / 2) * input_width + input_center_column_no + filter_size / 2; r[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 2]; } if (threadIdx.x > blockDim.x - filter_size / 2 - 1 && threadIdx.y < filter_size / 2) { // правые верхние клетки-призраки size_t pixel_idx = (input_center_row_no - filter_size / 2) * input_width + (input_center_column_no + filter_size / 2); r[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 2]; } if (threadIdx.x < filter_size / 2 && threadIdx.y > blockDim.y - filter_size / 2 - 1) { // левые нижние клетки-призраки size_t pixel_idx = (input_center_row_no + filter_size / 2) * input_width + (input_center_column_no - filter_size / 2); r[(threadIdx.x) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 2]; } if (threadIdx.x < filter_size / 2 && threadIdx.y < filter_size / 2) { // левые верхние клетки-призраки size_t pixel_idx = (input_center_row_no - filter_size / 2) * input_width + (input_center_column_no - filter_size / 2); r[(threadIdx.x) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 2]; } if (threadIdx.x < filter_size / 2) { // левые клетки-призраки size_t pixel_idx = input_center_row_no * input_width + (input_center_column_no - filter_size / 2); r[(threadIdx.x) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 2]; } if (threadIdx.x > blockDim.x - filter_size / 2 - 1) { // правые клетки-призраки size_t pixel_idx = input_center_row_no * input_width + (input_center_column_no + filter_size / 2); r[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + (filter_size / 2) * 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 2]; } if (threadIdx.y < filter_size / 2) { // верхние клетки-призраки size_t pixel_idx = (input_center_row_no - filter_size / 2) * input_width + input_center_column_no; r[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y] = d_input[3 * pixel_idx + 2]; } if (threadIdx.y > blockDim.y - filter_size / 2 - 1) { // нижние клетки-призраки size_t pixel_idx = (input_center_row_no + filter_size / 2) * input_width + input_center_column_no; r[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + (filter_size / 2) * 2] = d_input[3 * pixel_idx + 2]; } // клетки не призраки size_t pixel_idx = input_center_row_no * input_width + input_center_column_no; r[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 0]; g[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 1]; b[(threadIdx.x + filter_size / 2) * pixels_width + threadIdx.y + filter_size / 2] = d_input[3 * pixel_idx + 2]; // Дожидаемся, когда всё будет скопировано в shared память. __syncthreads(); // Создаём в каждой нити массивы, которые будем сортировать для поиска медианы. uint16_t* window_r = new uint16_t[filter_size * filter_size]; uint16_t* window_g = new uint16_t[filter_size * filter_size]; uint16_t* window_b = new uint16_t[filter_size * filter_size]; size_t window_idx = 0; // Заполняем массивы. for (int i = 0; i < filter_size; i ++) { for (int j = 0; j < filter_size; j++) { window_r[window_idx] = r[(threadIdx.x + i) * pixels_width + threadIdx.y + j]; window_g[window_idx] = g[(threadIdx.x + i) * pixels_width + threadIdx.y + j]; window_b[window_idx] = b[(threadIdx.x + i) * pixels_width + threadIdx.y + j]; window_idx += 1; } } // Сортируем массивы. sort(window_r, filter_size * filter_size); sort(window_g, filter_size * filter_size); sort(window_b, filter_size * filter_size); // Получаем значения медиан. uint8_t r_median = static_cast<uint8_t>((window_r[filter_size * filter_size / 2] + window_r[(filter_size * filter_size + 1) / 2]) / 2); uint8_t g_median = static_cast<uint8_t>((window_g[filter_size * filter_size / 2] + window_g[(filter_size * filter_size + 1) / 2]) / 2); uint8_t b_median = static_cast<uint8_t>((window_b[filter_size * filter_size / 2] + window_b[(filter_size * filter_size + 1) / 2]) / 2); // Записываем результат. size_t output_center_idx = output_center_row_no * output_width + output_center_column_no; d_output[3 * output_center_idx + 0] = r_median; d_output[3 * output_center_idx + 1] = g_median; d_output[3 * output_center_idx + 2] = b_median; // Чистим память. delete[] window_r; delete[] window_g; delete[] window_b; } } int main(int argc, char *argv[]) { // Введение в CUDA: https://developer.nvidia.com/blog/easy-introduction-cuda-c-and-c/ assert(argc == 4); // SimpleFilter input.bmp output.bmp filter_size // Задаём размер фильтра. const size_t filter_size = std::stoul(argv[3]); // Читаем исходное изображение. BMP input_image(argv[1]); assert(input_image.data.size() == 3 * input_image.bmp_info_header.width * input_image.bmp_info_header.height); // Создаём объект отфильтрованного изображения. BMP output_image(input_image.bmp_info_header.width - filter_size + 1, input_image.bmp_info_header.height - filter_size + 1, false); assert(output_image.data.size() == 3 * output_image.bmp_info_header.width * output_image.bmp_info_header.height); // Выделяем память на видеокарте и копируем туда исходное изображение. uint8_t* d_input, *d_output; cudaMalloc(&d_input, input_image.data.size()); cudaMalloc(&d_output, output_image.data.size()); cudaMemcpy(d_input, input_image.data.data(), input_image.data.size(), cudaMemcpyHostToDevice); // Запускаем фильтрацию. // Про индексацию: // https://medium.com/@erangadulshan.14/1d-2d-and-3d-thread-allocation-for-loops-in-cuda-e0f908537a52 // https://www.researchgate.net/post/The_optimal_number_of_threads_per_block_in_CUDA_programming dim3 dimBlock(16, 16); dim3 dimGrid((output_image.bmp_info_header.width + 15) / 16, (output_image.bmp_info_header.height + 15) / 16); // Про динамическую общую память: https://stackoverflow.com/questions/24942073/dynamic-shared-memory-in-cuda const size_t shared_memory_size = (dimBlock.x + (filter_size / 2) * 2) * (dimBlock.x + (filter_size / 2) * 2) * 3; filter<<<dimGrid, dimBlock, shared_memory_size>>>( d_input, input_image.bmp_info_header.width, input_image.bmp_info_header.height, d_output, output_image.bmp_info_header.width, output_image.bmp_info_header.height, filter_size); // Забираем результат с видеокарты. cudaMemcpy(const_cast<uint8_t*>(output_image.data.data()), d_output, output_image.data.size(), cudaMemcpyDeviceToHost); // Сохраняем. output_image.write(argv[2]); // Очищаем память. cudaFree(d_input); cudaFree(d_output); }
71bf76f8d67632df0922dd36ad8eb0cdd11af9a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define N 1024 __global__ void getMaxValueOfRow(float *d_arr, float *maxArray) { int t = threadIdx.x; int bid = blockIdx.x; for (int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if (t % (2 * stride) == 0) d_arr[t + (bid % N) * N] = d_arr[t + (bid % N) * N + stride] > d_arr[t + (bid % N) * N] ? d_arr[t + (bid % N) * N + stride] : d_arr[t + (bid % N) * N]; } maxArray[bid % N] = d_arr[t + (bid % N) * N]; } int main() { float *h_arr, *d_arr, *h_maxArray, *d_maxArray; int total = N * N; int mem_size = total * sizeof(float); h_arr = (float *) malloc(mem_size); h_maxArray = (float *) malloc(N * sizeof(float)); memset(h_maxArray, 0, N); for (int i = 0; i < N * N; i++) { h_arr[i] = i + 1; } hipMalloc((void **) &d_arr, mem_size); hipMalloc((void **) &d_maxArray, N * sizeof(float)); hipMemcpy(d_arr, h_arr, mem_size, hipMemcpyHostToDevice); hipMemcpy(d_maxArray, h_maxArray, N * sizeof(float), hipMemcpyHostToDevice); dim3 threadsPerBlock(N); dim3 blocksPerGrid(N); hipLaunchKernelGGL(( getMaxValueOfRow) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_arr, d_maxArray); hipMemcpy(h_maxArray, d_maxArray, N * sizeof(float), hipMemcpyDeviceToHost); for(int i = 0; i < N; i++){ printf("The max number of row %d :%.f\n", i, h_maxArray[i]); } free(h_arr); free(h_maxArray); hipFree(d_arr); hipFree(d_maxArray); return 0; }
71bf76f8d67632df0922dd36ad8eb0cdd11af9a4.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define N 1024 __global__ void getMaxValueOfRow(float *d_arr, float *maxArray) { int t = threadIdx.x; int bid = blockIdx.x; for (int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if (t % (2 * stride) == 0) d_arr[t + (bid % N) * N] = d_arr[t + (bid % N) * N + stride] > d_arr[t + (bid % N) * N] ? d_arr[t + (bid % N) * N + stride] : d_arr[t + (bid % N) * N]; } maxArray[bid % N] = d_arr[t + (bid % N) * N]; } int main() { float *h_arr, *d_arr, *h_maxArray, *d_maxArray; int total = N * N; int mem_size = total * sizeof(float); h_arr = (float *) malloc(mem_size); h_maxArray = (float *) malloc(N * sizeof(float)); memset(h_maxArray, 0, N); for (int i = 0; i < N * N; i++) { h_arr[i] = i + 1; } cudaMalloc((void **) &d_arr, mem_size); cudaMalloc((void **) &d_maxArray, N * sizeof(float)); cudaMemcpy(d_arr, h_arr, mem_size, cudaMemcpyHostToDevice); cudaMemcpy(d_maxArray, h_maxArray, N * sizeof(float), cudaMemcpyHostToDevice); dim3 threadsPerBlock(N); dim3 blocksPerGrid(N); getMaxValueOfRow <<<blocksPerGrid, threadsPerBlock>>> (d_arr, d_maxArray); cudaMemcpy(h_maxArray, d_maxArray, N * sizeof(float), cudaMemcpyDeviceToHost); for(int i = 0; i < N; i++){ printf("The max number of row %d :%.f\n", i, h_maxArray[i]); } free(h_arr); free(h_maxArray); cudaFree(d_arr); cudaFree(d_maxArray); return 0; }
84228edcf07eaf6ac96adfa964b25a46800a0230.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" #include "vol2col.h" #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" #include "generic/VolumetricDilatedConvolution.cu" #include "THHGenerateFloatTypes.h"
84228edcf07eaf6ac96adfa964b25a46800a0230.cu
#include "THCUNN.h" #include "common.h" #include "vol2col.h" #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" #include "generic/VolumetricDilatedConvolution.cu" #include "THCGenerateFloatTypes.h"
a2e4aefd0df5375ed9e859e4b6d92d484d4ed671.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions normal z -> s d c */ #include "common_magma.h" #define PRECISION_z #include "commonblas.h" __global__ void ztranspose3_32( hipDoubleComplex *B, int ldb, const hipDoubleComplex *A, int lda, int m, int m32, int n, int n32) { __shared__ hipDoubleComplex a[32][ZSIZE_1SHARED+1]; int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x*32; int iby = blockIdx.y*32; A += ibx + inx + __mul24( iby + iny, lda ); B += iby + inx + __mul24( ibx + iny, ldb ); int t2 = iby+iny; if (ibx+inx<m) { if (t2 <n) { a[iny+0][inx] = A[0*lda]; if (t2+ 8<n) { a[iny+8][inx] = A[8*lda]; if (t2 + 16<n) { a[iny+16][inx] = A[16*lda]; if (t2 + 24<n) a[iny+24][inx] = A[24*lda]; } } } } __syncthreads(); #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) if (iby + inx < n){ if (ibx + iny <m){ B[0*ldb] = a[inx][iny+0]; if (ibx + iny + 8 <m){ B[8*ldb] = a[inx][iny+8]; if (ibx + iny +16 <m){ B[16*ldb] = a[inx][iny+16]; if (ibx + iny + 24 <m) B[24*ldb] = a[inx][iny+24]; } } } } #else /* defined(PRECISION_z) */ if (iby + inx < n){ if (ibx + iny <m){ B[0*ldb] = a[inx][iny+0]; if (ibx + iny + 8 <m){ B[8*ldb] = a[inx][iny+8]; } } if (iby + inx + 16 < n) { if (ibx + iny <m){ B[0*ldb+16] = a[inx+16][iny+0]; if (ibx + iny + 8 < m){ B[8*ldb+16] = a[inx+16][iny+8]; } } } } __syncthreads(); A += ZSIZE_1SHARED; B += __mul24( 16, ldb); a[iny+0][inx] = A[0*lda]; a[iny+8][inx] = A[8*lda]; a[iny+16][inx] = A[16*lda]; a[iny+24][inx] = A[24*lda]; __syncthreads(); if (iby + inx < n){ if (ibx + iny + 16 <m){ B[0*ldb] = a[inx][iny+0]; if (ibx + iny + 24 <m){ B[8*ldb] = a[inx][iny+8]; } } if (iby + inx + 16 < n) { if (ibx + iny + 16 <m){ B[0*ldb+16] = a[inx+16][iny+0]; if (ibx + iny + 24 <m){ B[8*ldb+16] = a[inx+16][iny+8]; } } } } #endif } __global__ void ztranspose2_32( hipDoubleComplex *B, int ldb, const hipDoubleComplex *A, int lda, int m, int m32, int n, int n32) { __shared__ hipDoubleComplex a[32][ZSIZE_1SHARED+1]; int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x*32; int iby = blockIdx.y*32; int dx, dy; if (ibx+32<m) dx = 0; else dx = m32; if (iby+32<n) dy = 0; else dy = n32; A += ibx + inx -dx + __mul24( iby + iny - dy, lda ); B += iby + inx -dy + __mul24( ibx + iny - dx, ldb ); a[iny+0][inx] = A[0*lda]; a[iny+8][inx] = A[8*lda]; a[iny+16][inx] = A[16*lda]; a[iny+24][inx] = A[24*lda]; __syncthreads(); #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[16*ldb] = a[inx][iny+16]; B[24*ldb] = a[inx][iny+24]; #else /* defined(PRECISION_z) */ B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[0*ldb+16] = a[inx+16][iny+0]; B[8*ldb+16] = a[inx+16][iny+8]; __syncthreads(); A += ZSIZE_1SHARED; B += __mul24( 16, ldb); a[iny+0][inx] = A[0*lda]; a[iny+8][inx] = A[8*lda]; a[iny+16][inx] = A[16*lda]; a[iny+24][inx] = A[24*lda]; __syncthreads(); B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[0*ldb+16] = a[inx+16][iny+0]; B[8*ldb+16] = a[inx+16][iny+8]; #endif } // // m, n - dimensions in the source (input) matrix // This version transposes for general m, n . // Note that ldi >= m and ldo >= n. // extern "C" void magmablas_ztranspose2(hipDoubleComplex *odata, magma_int_t ldo, const hipDoubleComplex *idata, magma_int_t ldi, magma_int_t m, magma_int_t n ) { /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( ZSIZE_1SHARED, 8, 1 ); dim3 grid( (m+31)/32, (n+31)/32, 1 ); hipLaunchKernelGGL(( ztranspose3_32), dim3(grid), dim3(threads), 0, magma_stream , odata, ldo, idata, ldi, // m, m%32, n, n%32); m, (32-m%32)%32, n, (32-n%32)%32); } extern "C" void magmablas_ztranspose2s(hipDoubleComplex *odata, magma_int_t ldo, const hipDoubleComplex *idata, magma_int_t ldi, magma_int_t m, magma_int_t n, hipStream_t *stream ) { /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( ZSIZE_1SHARED, 8, 1 ); dim3 grid( (m+31)/32, (n+31)/32, 1 ); hipLaunchKernelGGL(( ztranspose3_32), dim3(grid), dim3(threads), 0, *stream , odata, ldo, idata, ldi, // m, m%32, n, n%32); m, (32-m%32)%32, n, (32-n%32)%32); }
a2e4aefd0df5375ed9e859e4b6d92d484d4ed671.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions normal z -> s d c */ #include "common_magma.h" #define PRECISION_z #include "commonblas.h" __global__ void ztranspose3_32( cuDoubleComplex *B, int ldb, const cuDoubleComplex *A, int lda, int m, int m32, int n, int n32) { __shared__ cuDoubleComplex a[32][ZSIZE_1SHARED+1]; int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x*32; int iby = blockIdx.y*32; A += ibx + inx + __mul24( iby + iny, lda ); B += iby + inx + __mul24( ibx + iny, ldb ); int t2 = iby+iny; if (ibx+inx<m) { if (t2 <n) { a[iny+0][inx] = A[0*lda]; if (t2+ 8<n) { a[iny+8][inx] = A[8*lda]; if (t2 + 16<n) { a[iny+16][inx] = A[16*lda]; if (t2 + 24<n) a[iny+24][inx] = A[24*lda]; } } } } __syncthreads(); #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) if (iby + inx < n){ if (ibx + iny <m){ B[0*ldb] = a[inx][iny+0]; if (ibx + iny + 8 <m){ B[8*ldb] = a[inx][iny+8]; if (ibx + iny +16 <m){ B[16*ldb] = a[inx][iny+16]; if (ibx + iny + 24 <m) B[24*ldb] = a[inx][iny+24]; } } } } #else /* defined(PRECISION_z) */ if (iby + inx < n){ if (ibx + iny <m){ B[0*ldb] = a[inx][iny+0]; if (ibx + iny + 8 <m){ B[8*ldb] = a[inx][iny+8]; } } if (iby + inx + 16 < n) { if (ibx + iny <m){ B[0*ldb+16] = a[inx+16][iny+0]; if (ibx + iny + 8 < m){ B[8*ldb+16] = a[inx+16][iny+8]; } } } } __syncthreads(); A += ZSIZE_1SHARED; B += __mul24( 16, ldb); a[iny+0][inx] = A[0*lda]; a[iny+8][inx] = A[8*lda]; a[iny+16][inx] = A[16*lda]; a[iny+24][inx] = A[24*lda]; __syncthreads(); if (iby + inx < n){ if (ibx + iny + 16 <m){ B[0*ldb] = a[inx][iny+0]; if (ibx + iny + 24 <m){ B[8*ldb] = a[inx][iny+8]; } } if (iby + inx + 16 < n) { if (ibx + iny + 16 <m){ B[0*ldb+16] = a[inx+16][iny+0]; if (ibx + iny + 24 <m){ B[8*ldb+16] = a[inx+16][iny+8]; } } } } #endif } __global__ void ztranspose2_32( cuDoubleComplex *B, int ldb, const cuDoubleComplex *A, int lda, int m, int m32, int n, int n32) { __shared__ cuDoubleComplex a[32][ZSIZE_1SHARED+1]; int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x*32; int iby = blockIdx.y*32; int dx, dy; if (ibx+32<m) dx = 0; else dx = m32; if (iby+32<n) dy = 0; else dy = n32; A += ibx + inx -dx + __mul24( iby + iny - dy, lda ); B += iby + inx -dy + __mul24( ibx + iny - dx, ldb ); a[iny+0][inx] = A[0*lda]; a[iny+8][inx] = A[8*lda]; a[iny+16][inx] = A[16*lda]; a[iny+24][inx] = A[24*lda]; __syncthreads(); #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[16*ldb] = a[inx][iny+16]; B[24*ldb] = a[inx][iny+24]; #else /* defined(PRECISION_z) */ B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[0*ldb+16] = a[inx+16][iny+0]; B[8*ldb+16] = a[inx+16][iny+8]; __syncthreads(); A += ZSIZE_1SHARED; B += __mul24( 16, ldb); a[iny+0][inx] = A[0*lda]; a[iny+8][inx] = A[8*lda]; a[iny+16][inx] = A[16*lda]; a[iny+24][inx] = A[24*lda]; __syncthreads(); B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[0*ldb+16] = a[inx+16][iny+0]; B[8*ldb+16] = a[inx+16][iny+8]; #endif } // // m, n - dimensions in the source (input) matrix // This version transposes for general m, n . // Note that ldi >= m and ldo >= n. // extern "C" void magmablas_ztranspose2(cuDoubleComplex *odata, magma_int_t ldo, const cuDoubleComplex *idata, magma_int_t ldi, magma_int_t m, magma_int_t n ) { /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( ZSIZE_1SHARED, 8, 1 ); dim3 grid( (m+31)/32, (n+31)/32, 1 ); ztranspose3_32<<< grid, threads, 0, magma_stream >>>( odata, ldo, idata, ldi, // m, m%32, n, n%32); m, (32-m%32)%32, n, (32-n%32)%32); } extern "C" void magmablas_ztranspose2s(cuDoubleComplex *odata, magma_int_t ldo, const cuDoubleComplex *idata, magma_int_t ldi, magma_int_t m, magma_int_t n, cudaStream_t *stream ) { /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( ZSIZE_1SHARED, 8, 1 ); dim3 grid( (m+31)/32, (n+31)/32, 1 ); ztranspose3_32<<< grid, threads, 0, *stream >>>( odata, ldo, idata, ldi, // m, m%32, n, n%32); m, (32-m%32)%32, n, (32-n%32)%32); }
5e17022f4a0a458d4da9d2f35453e6cd1dbfe722.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include "hip/hip_runtime.h" __global__ void reduction(const int N, float *a, float *result) { int thread = threadIdx.x; int block = blockIdx.x; int blockSize = blockDim.x; int gridSize = gridDim.x; //unique global thread ID int id = thread + block*blockSize; __volatile__ __shared__ float s_sum[256]; float sum = 0; for (int i=0; i<4; i++){ if(id+i*blockSize*gridSize<N){ sum += a[id+i*blockSize*gridSize]; //add the thread's id to start } } s_sum[thread] = sum; __syncthreads(); //make sure the write to shared is finished if (thread<128) {//first half s_sum[thread] += s_sum[thread+128]; } __syncthreads(); //make sure the write to shared is finished if (thread<64) {//next half s_sum[thread] += s_sum[thread+64]; } __syncthreads(); //make sure the write to shared is finished if (thread<32) {//next half s_sum[thread] += s_sum[thread+32]; } __syncthreads(); //make sure the write to shared is finished if (thread<16) {//next half s_sum[thread] += s_sum[thread+16]; } __syncthreads(); //make sure the write to shared is finished if (thread<8) {//next half s_sum[thread] += s_sum[thread+8]; } __syncthreads(); //make sure the write to shared is finished if (thread<4) {//next half s_sum[thread] += s_sum[thread+4]; } __syncthreads(); //make sure the write to shared is finished if (thread<2) {//next half s_sum[thread] += s_sum[thread+2]; } __syncthreads(); //make sure the write to shared is finished if (thread<1) {//final piece s_sum[thread] += s_sum[thread+1]; result[block] = s_sum[thread]; } } //perform a reduction on a vector of length N int main (int argc, char **argv) { int N = atoi(argv[1]); double seed=clock(); srand48(seed); //allocate memory on host float *h_a = (float*) malloc(N*sizeof(float)); //populate with random data for (int n=0;n<N;n++) { h_a[n] = drand48(); } //perform the reduction on host float sum = 0.; for (int n=0;n<N;n++) { sum += h_a[n]; } printf("The Host's sum was %f \n", sum); float *d_a, *d_sum; int Nnew = (N+256-1)/256; //size of the 'reduced' vector hipMalloc(&d_a, N*sizeof(float)); hipMalloc(&d_sum, Nnew*sizeof(float)); float *h_sum = (float*) malloc(Nnew*sizeof(float)); //populate the device array with the same data as the host hipMemcpy(d_a,h_a,N*sizeof(float),hipMemcpyHostToDevice); do { Nnew = (N+4*256-1)/(4*256); //block dimensions dim3 B(256,1,1); //grid dimensions dim3 G(Nnew,1,1); hipLaunchKernelGGL(( reduction) , dim3(G),dim3(B) , 0, 0, N, d_a, d_sum); //overwrite the a vector with the partially reduced vector hipMemcpy(d_a,d_sum,Nnew*sizeof(float),hipMemcpyDeviceToDevice); N = Nnew; } while (Nnew>1); hipMemcpy(h_sum,d_sum,1*sizeof(float),hipMemcpyDeviceToHost); printf("The Device's sum was %f \n", h_sum[0]); free(h_sum); free(h_a); hipFree(d_a); hipFree(d_sum); return 0; }
5e17022f4a0a458d4da9d2f35453e6cd1dbfe722.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "cuda.h" __global__ void reduction(const int N, float *a, float *result) { int thread = threadIdx.x; int block = blockIdx.x; int blockSize = blockDim.x; int gridSize = gridDim.x; //unique global thread ID int id = thread + block*blockSize; __volatile__ __shared__ float s_sum[256]; float sum = 0; for (int i=0; i<4; i++){ if(id+i*blockSize*gridSize<N){ sum += a[id+i*blockSize*gridSize]; //add the thread's id to start } } s_sum[thread] = sum; __syncthreads(); //make sure the write to shared is finished if (thread<128) {//first half s_sum[thread] += s_sum[thread+128]; } __syncthreads(); //make sure the write to shared is finished if (thread<64) {//next half s_sum[thread] += s_sum[thread+64]; } __syncthreads(); //make sure the write to shared is finished if (thread<32) {//next half s_sum[thread] += s_sum[thread+32]; } __syncthreads(); //make sure the write to shared is finished if (thread<16) {//next half s_sum[thread] += s_sum[thread+16]; } __syncthreads(); //make sure the write to shared is finished if (thread<8) {//next half s_sum[thread] += s_sum[thread+8]; } __syncthreads(); //make sure the write to shared is finished if (thread<4) {//next half s_sum[thread] += s_sum[thread+4]; } __syncthreads(); //make sure the write to shared is finished if (thread<2) {//next half s_sum[thread] += s_sum[thread+2]; } __syncthreads(); //make sure the write to shared is finished if (thread<1) {//final piece s_sum[thread] += s_sum[thread+1]; result[block] = s_sum[thread]; } } //perform a reduction on a vector of length N int main (int argc, char **argv) { int N = atoi(argv[1]); double seed=clock(); srand48(seed); //allocate memory on host float *h_a = (float*) malloc(N*sizeof(float)); //populate with random data for (int n=0;n<N;n++) { h_a[n] = drand48(); } //perform the reduction on host float sum = 0.; for (int n=0;n<N;n++) { sum += h_a[n]; } printf("The Host's sum was %f \n", sum); float *d_a, *d_sum; int Nnew = (N+256-1)/256; //size of the 'reduced' vector cudaMalloc(&d_a, N*sizeof(float)); cudaMalloc(&d_sum, Nnew*sizeof(float)); float *h_sum = (float*) malloc(Nnew*sizeof(float)); //populate the device array with the same data as the host cudaMemcpy(d_a,h_a,N*sizeof(float),cudaMemcpyHostToDevice); do { Nnew = (N+4*256-1)/(4*256); //block dimensions dim3 B(256,1,1); //grid dimensions dim3 G(Nnew,1,1); reduction <<< G,B >>> (N, d_a, d_sum); //overwrite the a vector with the partially reduced vector cudaMemcpy(d_a,d_sum,Nnew*sizeof(float),cudaMemcpyDeviceToDevice); N = Nnew; } while (Nnew>1); cudaMemcpy(h_sum,d_sum,1*sizeof(float),cudaMemcpyDeviceToHost); printf("The Device's sum was %f \n", h_sum[0]); free(h_sum); free(h_a); cudaFree(d_a); cudaFree(d_sum); return 0; }
aca21f92aea80ec5d291040548693c7701dedd24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/distance_op.h" #include "caffe2/utils/conversions.h" #include <hipcub/hipcub.hpp> namespace caffe2 { namespace { template <typename T> __global__ void SquaredL2DistanceKernel( const int N, const int D, const T* X, const T* Y, T* distance) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < N; i += gridDim.x) { float dist = 0.0; for (int j = threadIdx.x; j < D; j += blockDim.x) { T diff = X[i * D + j] - Y[i * D + j]; dist += diff * diff; } float total_dist = BlockReduce(temp_storage).Sum(dist); __syncthreads(); if (threadIdx.x == 0) { distance[i] = total_dist / 2.0; } } } } // namespace template <> bool SquaredL2DistanceOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto* distance = Output(0); CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ( X.dim32(i), Y.dim32(i), "Mismatch in dimensions", X.dims(), " / ", Y.dims()); } int N = X.ndim() > 0 ? X.dim32(0) : 1; int D = X.size() / N; distance->Resize(vector<TIndex>(size_t(1), N)); hipLaunchKernelGGL(( SquaredL2DistanceKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), Y.data<float>(), distance->template mutable_data<float>()); return true; } namespace { template <typename T> __global__ void StripedScaleKernel(const int N, const int D, const T* alpha, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, N * D) { int k = i / D; y[i] = x[i] * alpha[k]; } } } template <> bool SquaredL2DistanceGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto& dDistance = Input(2); auto* dX = Output(0); auto* dY = Output(1); int N = X.ndim() > 0 ? X.dim32(0) : 1; int D = N > 0 ? X.size() / N : 0; CAFFE_ENFORCE(X.ndim() == Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ( X.dim32(i), Y.dim32(i), "Mismatch on dimensions: ", X.dims(), " / ", Y.dims()); } CAFFE_ENFORCE_EQ(dDistance.ndim(), 1); CAFFE_ENFORCE_EQ(dDistance.dim32(0), N); dX->ResizeLike(X); dY->ResizeLike(Y); math::Sub<float, CUDAContext>( X.size(), X.data<float>(), Y.data<float>(), dX->template mutable_data<float>(), &context_); hipLaunchKernelGGL(( StripedScaleKernel<float>) , dim3(CAFFE_GET_BLOCKS(N * D)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, dDistance.data<float>(), dX->data<float>(), dX->template mutable_data<float>()); // The gradient of the other side is basically the negative. math::Scale<float, CUDAContext>( X.size(), -1, dX->data<float>(), dY->template mutable_data<float>(), &context_); return true; } namespace { template <typename T> __global__ void L1DistanceKernel( const int N, const int D, const T* X, const T* Y, T* distance) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < N; i += gridDim.x) { float sum = 0.0f; for (int j = threadIdx.x; j < D; j += blockDim.x) { sum += fabsf( convert::To<T, float>(X[i * D + j]) - convert::To<T, float>(Y[i * D + j])); } float aggregate = BlockReduce(temp_storage).Sum(sum); __syncthreads(); if (threadIdx.x == 0) { distance[i] = aggregate; } } } } // namespace template <> bool L1DistanceOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto* distance = Output(0); CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i)); } const int N = X.ndim() > 0 ? X.dim32(0) : 1; const int D = N > 0 ? X.size() / N : 0; distance->Resize(vector<TIndex>(size_t(1), N)); hipLaunchKernelGGL(( L1DistanceKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), Y.data<float>(), distance->template mutable_data<float>()); return true; } namespace { template <typename T> __global__ void L1DistanceGradientKernel( const int N, const int D, const T* X, const T* Y, const T* dDistance, T* dX, T* dY) { CUDA_1D_KERNEL_LOOP(i, N * D) { constexpr float kEps = 1e-12; int k = i / D; if (X[i] - Y[i] < -kEps) { dX[i] = -dDistance[k]; dY[i] = dDistance[k]; } else if (X[i] - Y[i] > kEps) { dX[i] = dDistance[k]; dY[i] = -dDistance[k]; } else { dX[i] = 0; dY[i] = 0; } } } } // namespace template <> bool L1DistanceGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto& dDistance = Input(2); auto* dX = Output(0); auto* dY = Output(1); int N = X.ndim() > 0 ? X.dim32(0) : 1; int D = N > 0 ? X.size() / N : 0; CAFFE_ENFORCE(X.ndim() == Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ( X.dim32(i), Y.dim32(i), "Mismatch on dimensions: ", X.dims(), " / ", Y.dims()); } CAFFE_ENFORCE_EQ(dDistance.ndim(), 1); CAFFE_ENFORCE_EQ(dDistance.dim32(0), N); dX->ResizeLike(X); dY->ResizeLike(Y); hipLaunchKernelGGL(( L1DistanceGradientKernel), dim3(CAFFE_GET_BLOCKS(N * D)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), Y.data<float>(), dDistance.data<float>(), dX->template mutable_data<float>(), dY->template mutable_data<float>()); return true; } namespace { template <typename T> __global__ void DotProductKernel(const int N, const int D, const T* X, const T* Y, T* result) { for (int i = blockIdx.x; i < N; i += gridDim.x) { T partialSum = 0; int offset = i * D; for (int j = threadIdx.x; j < D; j += blockDim.x) { partialSum += X[offset + j] * Y[offset + j]; } typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; T sum = BlockReduce(temp_storage).Sum(partialSum); __syncthreads(); if (threadIdx.x == 0) { result[i] = sum; } } } // X.size() = N*D, Y.size() = N template <typename T> __global__ void BatchedMul(const int N, const int D, const T* X, const T* Y, T* result) { CUDA_1D_KERNEL_LOOP(i, N * D) { result[i] = X[i] * Y[i / D]; } } // X.size() = N*D, Y.size() = N template <typename T> __global__ void Scale2AxpyScale( const int N, const T* scale, const T* XY, const T* XN, T* result) { CUDA_1D_KERNEL_LOOP(i, N) { result[i] = -scale[i] * XY[i] / (XN[i] * XN[i]); } } // X.size() = X*N, alpha.size() = N, Y.size() = X*N template <typename T> __global__ void BatchedAxpy(const int N, const int D, const T* alpha, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N * D) { Y[i] += X[i] * alpha[i / D]; } } } // namespace template <> bool CosineSimilarityOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(X_IN); auto& Y = Input(Y_IN); auto* result = Output(COS_OUT); CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i)); } const int N = X.ndim() > 0 ? X.dim32(0) : 1; const int D = X.size_from_dim(1); result->Resize(N); float* result_data = result->template mutable_data<float>(); const float* X_data = X.data<float>(); const float* Y_data = Y.data<float>(); // Auxiliary arrays, one allocation of memory aux_.Resize(2 * N); float* aux_data = aux_.mutable_data<float>(); float* x2 = aux_data; float* y2 = aux_data + N; float* scale = x2; const float kEps = 1e-12f; hipLaunchKernelGGL(( DotProductKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X_data, X_data, x2); hipLaunchKernelGGL(( DotProductKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, Y_data, Y_data, y2); hipLaunchKernelGGL(( DotProductKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X_data, Y_data, result_data); math::Maximum<float, CUDAContext>(N, kEps, x2, x2, &context_); math::Maximum<float, CUDAContext>(N, kEps, y2, y2, &context_); math::Mul(N, x2, y2, scale, &context_); math::Rsqrt(N, scale, scale, &context_); math::Mul(N, result_data, scale, result_data, &context_); return true; } template <> bool CosineSimilarityGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(X_IN); auto& Y = Input(Y_IN); auto& dCos = Input(DER_COS_IN); auto* dX = Output(DER_X_OUT); auto* dY = Output(DER_Y_OUT); const int N = X.ndim() > 0 ? X.dim32(0) : 1; const int D = X.size_from_dim(1); CAFFE_ENFORCE(X.ndim() == Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i)); } CAFFE_ENFORCE(dCos.ndim() == 1); CAFFE_ENFORCE(dCos.dim32(0) == N); dX->ResizeLike(X); dY->ResizeLike(Y); const auto* X_data = X.data<float>(); const auto* Y_data = Y.data<float>(); const auto* dCos_data = dCos.data<float>(); auto* dX_data = dX->template mutable_data<float>(); auto* dY_data = dY->template mutable_data<float>(); // one memory allocation, a few arrays aux_.Resize(6 * N); float* aux_data = aux_.mutable_data<float>(); float* xn = aux_data; float* yn = aux_data + N; float* xy = aux_data + 2 * N; float* xyn = aux_data + 3 * N; float* scale = aux_data + 4 * N; float* axpy_scale = aux_data + 5 * N; float kEps = 1e-12f; // ||x|| hipLaunchKernelGGL(( DotProductKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X_data, X_data, xn); math::Maximum<float, CUDAContext>(N, kEps, xn, xn, &context_); math::Sqrt<float, CUDAContext>(N, xn, xn, &context_); // ||y|| hipLaunchKernelGGL(( DotProductKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, Y_data, Y_data, yn); math::Maximum<float, CUDAContext>(N, kEps, yn, yn, &context_); math::Sqrt<float, CUDAContext>(N, yn, yn, &context_); // ||x|| * || y || math::Mul<float, CUDAContext>(N, xn, yn, xyn, &context_); hipLaunchKernelGGL(( DotProductKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X_data, Y_data, xy); math::Div<float, CUDAContext>(N, dCos_data, xyn, scale, &context_); // dX hipLaunchKernelGGL(( BatchedMul<float>), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, Y_data, scale, dX_data); hipLaunchKernelGGL(( Scale2AxpyScale<float>), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, scale, xy, xn, axpy_scale); hipLaunchKernelGGL(( BatchedAxpy<float>), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, axpy_scale, X_data, dX_data); // dY hipLaunchKernelGGL(( BatchedMul<float>), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X_data, scale, dY_data); hipLaunchKernelGGL(( Scale2AxpyScale<float>), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, scale, xy, yn, axpy_scale); hipLaunchKernelGGL(( BatchedAxpy<float>), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, axpy_scale, Y_data, dY_data); return true; } template <> bool DotProductOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(X_IN); auto& Y = Input(Y_IN); auto* result = Output(DOT_OUT); CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i)); } int N, D; if (X.size() > 0) { N = X.ndim() > 0 ? X.dim32(0) : 1; D = X.size() / N; } else { N = 0; D = 0; } result->Resize(N); hipLaunchKernelGGL(( DotProductKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), Y.data<float>(), result->template mutable_data<float>()); return true; } namespace { template <typename T> __global__ void DotProductGradientKernel( const int N, const int D, const T* X, const T* Y, const T* dDot, T* dX, T* dY) { CUDA_1D_KERNEL_LOOP(i, N * D) { T scale = dDot[i / D]; dX[i] = Y[i] * scale; dY[i] = X[i] * scale; } } } // namespace template <> bool DotProductGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(X_IN); auto& Y = Input(Y_IN); auto& dDot = Input(DER_DOT_IN); auto* dX = Output(DER_X_OUT); auto* dY = Output(DER_Y_OUT); int N, D; if (X.size() > 0) { N = X.ndim() > 0 ? X.dim32(0) : 1; D = X.size() / N; } else { N = 0; D = 0; } CAFFE_ENFORCE(X.ndim() == Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i)); } CAFFE_ENFORCE(dDot.ndim() == 1); CAFFE_ENFORCE(dDot.dim32(0) == N); dX->ResizeLike(X); dY->ResizeLike(Y); hipLaunchKernelGGL(( DotProductGradientKernel), dim3(CAFFE_GET_BLOCKS(N * D)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), Y.data<float>(), dDot.data<float>(), dX->template mutable_data<float>(), dY->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(SquaredL2Distance, SquaredL2DistanceOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SquaredL2DistanceGradient, SquaredL2DistanceGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(L1Distance, L1DistanceOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( L1DistanceGradient, L1DistanceGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(DotProduct, DotProductOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( DotProductGradient, DotProductGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( CosineSimilarity, CosineSimilarityOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( CosineSimilarityGradient, CosineSimilarityGradientOp<float, CUDAContext>); } // namespace caffe2
aca21f92aea80ec5d291040548693c7701dedd24.cu
#include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/distance_op.h" #include "caffe2/utils/conversions.h" #include <cub/block/block_reduce.cuh> namespace caffe2 { namespace { template <typename T> __global__ void SquaredL2DistanceKernel( const int N, const int D, const T* X, const T* Y, T* distance) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < N; i += gridDim.x) { float dist = 0.0; for (int j = threadIdx.x; j < D; j += blockDim.x) { T diff = X[i * D + j] - Y[i * D + j]; dist += diff * diff; } float total_dist = BlockReduce(temp_storage).Sum(dist); __syncthreads(); if (threadIdx.x == 0) { distance[i] = total_dist / 2.0; } } } } // namespace template <> bool SquaredL2DistanceOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto* distance = Output(0); CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ( X.dim32(i), Y.dim32(i), "Mismatch in dimensions", X.dims(), " / ", Y.dims()); } int N = X.ndim() > 0 ? X.dim32(0) : 1; int D = X.size() / N; distance->Resize(vector<TIndex>(size_t(1), N)); SquaredL2DistanceKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), Y.data<float>(), distance->template mutable_data<float>()); return true; } namespace { template <typename T> __global__ void StripedScaleKernel(const int N, const int D, const T* alpha, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, N * D) { int k = i / D; y[i] = x[i] * alpha[k]; } } } template <> bool SquaredL2DistanceGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto& dDistance = Input(2); auto* dX = Output(0); auto* dY = Output(1); int N = X.ndim() > 0 ? X.dim32(0) : 1; int D = N > 0 ? X.size() / N : 0; CAFFE_ENFORCE(X.ndim() == Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ( X.dim32(i), Y.dim32(i), "Mismatch on dimensions: ", X.dims(), " / ", Y.dims()); } CAFFE_ENFORCE_EQ(dDistance.ndim(), 1); CAFFE_ENFORCE_EQ(dDistance.dim32(0), N); dX->ResizeLike(X); dY->ResizeLike(Y); math::Sub<float, CUDAContext>( X.size(), X.data<float>(), Y.data<float>(), dX->template mutable_data<float>(), &context_); StripedScaleKernel<float> <<<CAFFE_GET_BLOCKS(N * D), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, dDistance.data<float>(), dX->data<float>(), dX->template mutable_data<float>()); // The gradient of the other side is basically the negative. math::Scale<float, CUDAContext>( X.size(), -1, dX->data<float>(), dY->template mutable_data<float>(), &context_); return true; } namespace { template <typename T> __global__ void L1DistanceKernel( const int N, const int D, const T* X, const T* Y, T* distance) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < N; i += gridDim.x) { float sum = 0.0f; for (int j = threadIdx.x; j < D; j += blockDim.x) { sum += fabsf( convert::To<T, float>(X[i * D + j]) - convert::To<T, float>(Y[i * D + j])); } float aggregate = BlockReduce(temp_storage).Sum(sum); __syncthreads(); if (threadIdx.x == 0) { distance[i] = aggregate; } } } } // namespace template <> bool L1DistanceOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto* distance = Output(0); CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i)); } const int N = X.ndim() > 0 ? X.dim32(0) : 1; const int D = N > 0 ? X.size() / N : 0; distance->Resize(vector<TIndex>(size_t(1), N)); L1DistanceKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), Y.data<float>(), distance->template mutable_data<float>()); return true; } namespace { template <typename T> __global__ void L1DistanceGradientKernel( const int N, const int D, const T* X, const T* Y, const T* dDistance, T* dX, T* dY) { CUDA_1D_KERNEL_LOOP(i, N * D) { constexpr float kEps = 1e-12; int k = i / D; if (X[i] - Y[i] < -kEps) { dX[i] = -dDistance[k]; dY[i] = dDistance[k]; } else if (X[i] - Y[i] > kEps) { dX[i] = dDistance[k]; dY[i] = -dDistance[k]; } else { dX[i] = 0; dY[i] = 0; } } } } // namespace template <> bool L1DistanceGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& Y = Input(1); auto& dDistance = Input(2); auto* dX = Output(0); auto* dY = Output(1); int N = X.ndim() > 0 ? X.dim32(0) : 1; int D = N > 0 ? X.size() / N : 0; CAFFE_ENFORCE(X.ndim() == Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ( X.dim32(i), Y.dim32(i), "Mismatch on dimensions: ", X.dims(), " / ", Y.dims()); } CAFFE_ENFORCE_EQ(dDistance.ndim(), 1); CAFFE_ENFORCE_EQ(dDistance.dim32(0), N); dX->ResizeLike(X); dY->ResizeLike(Y); L1DistanceGradientKernel<<< CAFFE_GET_BLOCKS(N * D), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), Y.data<float>(), dDistance.data<float>(), dX->template mutable_data<float>(), dY->template mutable_data<float>()); return true; } namespace { template <typename T> __global__ void DotProductKernel(const int N, const int D, const T* X, const T* Y, T* result) { for (int i = blockIdx.x; i < N; i += gridDim.x) { T partialSum = 0; int offset = i * D; for (int j = threadIdx.x; j < D; j += blockDim.x) { partialSum += X[offset + j] * Y[offset + j]; } typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; T sum = BlockReduce(temp_storage).Sum(partialSum); __syncthreads(); if (threadIdx.x == 0) { result[i] = sum; } } } // X.size() = N*D, Y.size() = N template <typename T> __global__ void BatchedMul(const int N, const int D, const T* X, const T* Y, T* result) { CUDA_1D_KERNEL_LOOP(i, N * D) { result[i] = X[i] * Y[i / D]; } } // X.size() = N*D, Y.size() = N template <typename T> __global__ void Scale2AxpyScale( const int N, const T* scale, const T* XY, const T* XN, T* result) { CUDA_1D_KERNEL_LOOP(i, N) { result[i] = -scale[i] * XY[i] / (XN[i] * XN[i]); } } // X.size() = X*N, alpha.size() = N, Y.size() = X*N template <typename T> __global__ void BatchedAxpy(const int N, const int D, const T* alpha, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N * D) { Y[i] += X[i] * alpha[i / D]; } } } // namespace template <> bool CosineSimilarityOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(X_IN); auto& Y = Input(Y_IN); auto* result = Output(COS_OUT); CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i)); } const int N = X.ndim() > 0 ? X.dim32(0) : 1; const int D = X.size_from_dim(1); result->Resize(N); float* result_data = result->template mutable_data<float>(); const float* X_data = X.data<float>(); const float* Y_data = Y.data<float>(); // Auxiliary arrays, one allocation of memory aux_.Resize(2 * N); float* aux_data = aux_.mutable_data<float>(); float* x2 = aux_data; float* y2 = aux_data + N; float* scale = x2; const float kEps = 1e-12f; DotProductKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, X_data, X_data, x2); DotProductKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, Y_data, Y_data, y2); DotProductKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, X_data, Y_data, result_data); math::Maximum<float, CUDAContext>(N, kEps, x2, x2, &context_); math::Maximum<float, CUDAContext>(N, kEps, y2, y2, &context_); math::Mul(N, x2, y2, scale, &context_); math::Rsqrt(N, scale, scale, &context_); math::Mul(N, result_data, scale, result_data, &context_); return true; } template <> bool CosineSimilarityGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(X_IN); auto& Y = Input(Y_IN); auto& dCos = Input(DER_COS_IN); auto* dX = Output(DER_X_OUT); auto* dY = Output(DER_Y_OUT); const int N = X.ndim() > 0 ? X.dim32(0) : 1; const int D = X.size_from_dim(1); CAFFE_ENFORCE(X.ndim() == Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i)); } CAFFE_ENFORCE(dCos.ndim() == 1); CAFFE_ENFORCE(dCos.dim32(0) == N); dX->ResizeLike(X); dY->ResizeLike(Y); const auto* X_data = X.data<float>(); const auto* Y_data = Y.data<float>(); const auto* dCos_data = dCos.data<float>(); auto* dX_data = dX->template mutable_data<float>(); auto* dY_data = dY->template mutable_data<float>(); // one memory allocation, a few arrays aux_.Resize(6 * N); float* aux_data = aux_.mutable_data<float>(); float* xn = aux_data; float* yn = aux_data + N; float* xy = aux_data + 2 * N; float* xyn = aux_data + 3 * N; float* scale = aux_data + 4 * N; float* axpy_scale = aux_data + 5 * N; float kEps = 1e-12f; // ||x|| DotProductKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, X_data, X_data, xn); math::Maximum<float, CUDAContext>(N, kEps, xn, xn, &context_); math::Sqrt<float, CUDAContext>(N, xn, xn, &context_); // ||y|| DotProductKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, Y_data, Y_data, yn); math::Maximum<float, CUDAContext>(N, kEps, yn, yn, &context_); math::Sqrt<float, CUDAContext>(N, yn, yn, &context_); // ||x|| * || y || math::Mul<float, CUDAContext>(N, xn, yn, xyn, &context_); DotProductKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, X_data, Y_data, xy); math::Div<float, CUDAContext>(N, dCos_data, xyn, scale, &context_); // dX BatchedMul<float><<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, Y_data, scale, dX_data); Scale2AxpyScale<float><<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, scale, xy, xn, axpy_scale); BatchedAxpy<float><<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, axpy_scale, X_data, dX_data); // dY BatchedMul<float><<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, X_data, scale, dY_data); Scale2AxpyScale<float><<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, scale, xy, yn, axpy_scale); BatchedAxpy<float><<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, axpy_scale, Y_data, dY_data); return true; } template <> bool DotProductOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(X_IN); auto& Y = Input(Y_IN); auto* result = Output(DOT_OUT); CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i)); } int N, D; if (X.size() > 0) { N = X.ndim() > 0 ? X.dim32(0) : 1; D = X.size() / N; } else { N = 0; D = 0; } result->Resize(N); DotProductKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), Y.data<float>(), result->template mutable_data<float>()); return true; } namespace { template <typename T> __global__ void DotProductGradientKernel( const int N, const int D, const T* X, const T* Y, const T* dDot, T* dX, T* dY) { CUDA_1D_KERNEL_LOOP(i, N * D) { T scale = dDot[i / D]; dX[i] = Y[i] * scale; dY[i] = X[i] * scale; } } } // namespace template <> bool DotProductGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(X_IN); auto& Y = Input(Y_IN); auto& dDot = Input(DER_DOT_IN); auto* dX = Output(DER_X_OUT); auto* dY = Output(DER_Y_OUT); int N, D; if (X.size() > 0) { N = X.ndim() > 0 ? X.dim32(0) : 1; D = X.size() / N; } else { N = 0; D = 0; } CAFFE_ENFORCE(X.ndim() == Y.ndim()); for (int i = 0; i < X.ndim(); ++i) { CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i)); } CAFFE_ENFORCE(dDot.ndim() == 1); CAFFE_ENFORCE(dDot.dim32(0) == N); dX->ResizeLike(X); dY->ResizeLike(Y); DotProductGradientKernel<<< CAFFE_GET_BLOCKS(N * D), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), Y.data<float>(), dDot.data<float>(), dX->template mutable_data<float>(), dY->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(SquaredL2Distance, SquaredL2DistanceOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SquaredL2DistanceGradient, SquaredL2DistanceGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(L1Distance, L1DistanceOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( L1DistanceGradient, L1DistanceGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(DotProduct, DotProductOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( DotProductGradient, DotProductGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( CosineSimilarity, CosineSimilarityOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( CosineSimilarityGradient, CosineSimilarityGradientOp<float, CUDAContext>); } // namespace caffe2
bada505366aa0da67efc26fcf3c6b3ddad829dcf.hip
// !!! This is a file automatically generated by hipify!!! /* Created based off of Cuda intro tutorial: https://devblogs.nvidia.com/even-easier-introduction-cuda/ Compile with g++: g++ add.cpp -o add Complie with Cuda nvcc: nvcc add.cu -o add_cuda * Must rename file to *.cu in order to compile with Cuda */ #include <iostream> #include <string> #include <math.h> #include <hip/hip_runtime_api.h> using namespace std; // Single Thread GPU Add __global__ void addCuda(int n, float* x, float* y) { for (int i = 0; i < n; i++) { y[i] = x[i] + y[i]; } } // Parallel GPU Add __global__ void addCudaParallel(int n, float* x, float* y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] + y[i]; } } // Parallel Block GPU Add __global__ void addCudaParallelBlock(int n, float* x, float* y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] + y[i]; } } void calcError(string label, float *y, float expected, int n) { // Check for error float maxError = 0.0f; float totalError = 0.0f; int totalOff = 0; for (int i = 0; i < n; i++) { maxError = fmax(maxError, fabs(y[i] - expected)); totalError += y[i] - expected; if (y[i] - expected != 0.0) totalOff++; } std::cout << label << std::endl; std::cout << "Max Error: " << maxError << std::endl; std::cout << "Total Error: " << totalError << std::endl; std::cout << "Total Off: " << totalOff << std::endl; // delete &maxError; // delete &totalError; // delete &totalOff; } void reset(float* x, float* y, int numCalcs) { for (int i = 0; i < numCalcs; i++) { x[i] = 1.0f; y[i] = 2.0f; } } int main(void) { int numCalcs = 1 << 20; // 1 Million elements to calculate float *x, *y; hipMallocManaged(&x, numCalcs*sizeof(float)); hipMallocManaged(&y, numCalcs*sizeof(float)); // Threading Parameters int blockSize = 256; int numBlocks = (numCalcs + blockSize - 1) / blockSize; // Sinlge Threaded GPU // reset(x, y, numCalcs); // addCuda<<<1, 1>>>(numCalcs, x, y); // Run addCuda on GPU // hipDeviceSynchronize(); // Wait for GPU to finish // calcError("addCuda()", y, 3.0f, numCalcs); // Calculate errors if any // Multithreaded GPU // reset(x, y, numCalcs); // addCudaParallel<<<1, blockSize>>>(numCalcs, x, y); // Run addCudaParallel on GPU // hipDeviceSynchronize(); // Wait for GPU to finish // calcError("addCudaParallel()", y, 3.0f, numCalcs); // Calculate errors if any // Multiblock GPU reset(x, y, numCalcs); hipLaunchKernelGGL(( addCudaParallelBlock), dim3(numBlocks), dim3(blockSize), 0, 0, numCalcs, x, y); // Run addCudaParallel on GPU hipDeviceSynchronize(); // Wait for GPU to finish calcError("addCudaParallelBlock()", y, 3.0f, numCalcs); // Calculate errors if any // Free Shared CPU & GPU memory hipFree(x); hipFree(y); return 0; }
bada505366aa0da67efc26fcf3c6b3ddad829dcf.cu
/* Created based off of Cuda intro tutorial: https://devblogs.nvidia.com/even-easier-introduction-cuda/ Compile with g++: g++ add.cpp -o add Complie with Cuda nvcc: nvcc add.cu -o add_cuda * Must rename file to *.cu in order to compile with Cuda */ #include <iostream> #include <string> #include <math.h> #include <cuda_profiler_api.h> using namespace std; // Single Thread GPU Add __global__ void addCuda(int n, float* x, float* y) { for (int i = 0; i < n; i++) { y[i] = x[i] + y[i]; } } // Parallel GPU Add __global__ void addCudaParallel(int n, float* x, float* y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] + y[i]; } } // Parallel Block GPU Add __global__ void addCudaParallelBlock(int n, float* x, float* y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] + y[i]; } } void calcError(string label, float *y, float expected, int n) { // Check for error float maxError = 0.0f; float totalError = 0.0f; int totalOff = 0; for (int i = 0; i < n; i++) { maxError = fmax(maxError, fabs(y[i] - expected)); totalError += y[i] - expected; if (y[i] - expected != 0.0) totalOff++; } std::cout << label << std::endl; std::cout << "Max Error: " << maxError << std::endl; std::cout << "Total Error: " << totalError << std::endl; std::cout << "Total Off: " << totalOff << std::endl; // delete &maxError; // delete &totalError; // delete &totalOff; } void reset(float* x, float* y, int numCalcs) { for (int i = 0; i < numCalcs; i++) { x[i] = 1.0f; y[i] = 2.0f; } } int main(void) { int numCalcs = 1 << 20; // 1 Million elements to calculate float *x, *y; cudaMallocManaged(&x, numCalcs*sizeof(float)); cudaMallocManaged(&y, numCalcs*sizeof(float)); // Threading Parameters int blockSize = 256; int numBlocks = (numCalcs + blockSize - 1) / blockSize; // Sinlge Threaded GPU // reset(x, y, numCalcs); // addCuda<<<1, 1>>>(numCalcs, x, y); // Run addCuda on GPU // cudaDeviceSynchronize(); // Wait for GPU to finish // calcError("addCuda()", y, 3.0f, numCalcs); // Calculate errors if any // Multithreaded GPU // reset(x, y, numCalcs); // addCudaParallel<<<1, blockSize>>>(numCalcs, x, y); // Run addCudaParallel on GPU // cudaDeviceSynchronize(); // Wait for GPU to finish // calcError("addCudaParallel()", y, 3.0f, numCalcs); // Calculate errors if any // Multiblock GPU reset(x, y, numCalcs); addCudaParallelBlock<<<numBlocks, blockSize>>>(numCalcs, x, y); // Run addCudaParallel on GPU cudaDeviceSynchronize(); // Wait for GPU to finish calcError("addCudaParallelBlock()", y, 3.0f, numCalcs); // Calculate errors if any // Free Shared CPU & GPU memory cudaFree(x); cudaFree(y); return 0; }
83faff4107d600a018023eddea2e03280c3db92c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* All modification made by Cambricon Corporation: 2018 Cambricon Corporation All rights reserved. All other contributions: Copyright (c) 2014--2018, the respective contributors All rights reserved. For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <vector> #include "caffe/layers/lrn_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int head = 0; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_gpu(bottom, top); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelForward(bottom, top); break; default: LOG(FATAL) << "Unknown normalization region."; } } template <typename Dtype> __global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelForward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LRNFillScale), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, k_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LRNComputeOutput), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; } template void LRNLayer<float>::CrossChannelForward_gpu( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void LRNLayer<double>::CrossChannelForward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); template <typename Dtype> void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_gpu(top, propagate_down, bottom); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelBackward(top, propagate_down, bottom); break; default: LOG(FATAL) << "Unknown normalization region."; } } template <typename Dtype> __global__ void LRNComputeDiff(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; const int pre_pad = size - (size + 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelBackward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LRNComputeDiff), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), bottom[0]->mutable_gpu_diff()); } template void LRNLayer<float>::CrossChannelBackward_gpu( const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom); template void LRNLayer<double>::CrossChannelBackward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom); INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); } // namespace caffe
83faff4107d600a018023eddea2e03280c3db92c.cu
/* All modification made by Cambricon Corporation: © 2018 Cambricon Corporation All rights reserved. All other contributions: Copyright (c) 2014--2018, the respective contributors All rights reserved. For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <vector> #include "caffe/layers/lrn_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int head = 0; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_gpu(bottom, top); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelForward(bottom, top); break; default: LOG(FATAL) << "Unknown normalization region."; } } template <typename Dtype> __global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelForward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNFillScale<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, k_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeOutput<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; } template void LRNLayer<float>::CrossChannelForward_gpu( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void LRNLayer<double>::CrossChannelForward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); template <typename Dtype> void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_gpu(top, propagate_down, bottom); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelBackward(top, propagate_down, bottom); break; default: LOG(FATAL) << "Unknown normalization region."; } } template <typename Dtype> __global__ void LRNComputeDiff(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; const int pre_pad = size - (size + 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelBackward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeDiff<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), bottom[0]->mutable_gpu_diff()); } template void LRNLayer<float>::CrossChannelBackward_gpu( const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom); template void LRNLayer<double>::CrossChannelBackward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom); INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); } // namespace caffe
2f05d40d089038690b9cc636082e32b488b7f5de.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "max_subsampling_2d_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "neural_network_cuda_exception.h" #include "../max_subsampling_layer.h" texture<float, hipTextureType1D, hipReadModeElementType> input_tex_ref; struct __align__(4) window_x_x_config { window_x_x_config(int window_x, int x) { this->window_x_x_pair = (((unsigned int)window_x) << 16) | (unsigned int)x; } unsigned int window_x_x_pair; }; struct __align__(4) y_feature_map_config { y_feature_map_config(int y, int feature_map_id) { this->y_feature_map_id_pair = (((unsigned int)y) << 16) | (unsigned int)feature_map_id; } unsigned int y_feature_map_id_pair; }; struct __align__(4) window_x_window_y_config { window_x_window_y_config(int window_x, int window_y) { this->window_x_window_y_pair = (((unsigned int)window_x) << 16) | (unsigned int)window_y; } unsigned int window_x_window_y_pair; }; struct __align__(4) x_y_config { x_y_config(int x, int y) { this->x_y_pair = (((unsigned int)x) << 16) | (unsigned int)y; } unsigned int x_y_pair; }; extern __shared__ float arr_sh[]; __global__ void max_subsampling_2d_tex_upd_kernel( float * __restrict output, x_y_config * __restrict max_positions, const window_x_x_config * __restrict window_x_x_config_list, const y_feature_map_config * __restrict y_feature_map_config_list, int subsampling_width, int subsampling_height, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count, int window_x_x_config_count, int y_feature_map_config_count) { int window_x_x_config_id = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; int local_thread_id = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x; int threadblock_size = blockDim.z * blockDim.y * blockDim.x; float * vals = arr_sh; int * max_pos_y_list = (int *)(vals + threadblock_size); bool in_bounds = (entry_id < entry_count) && (window_x_x_config_id < window_x_x_config_count) && (feature_map_config_id < y_feature_map_config_count); float res = -1.0e37F; int max_pos_y; int window_x; int output_x; int output_y; int feature_map_id; if (in_bounds) { window_x_x_config wxx = window_x_x_config_list[window_x_x_config_id]; output_x = wxx.window_x_x_pair & 0xFFFF; window_x = wxx.window_x_x_pair >> 16; y_feature_map_config yfm = y_feature_map_config_list[feature_map_config_id]; feature_map_id = yfm.y_feature_map_id_pair & 0xFFFF; output_y = yfm.y_feature_map_id_pair >> 16; int input_x = output_x * subsampling_width + window_x; int input_y = output_y * subsampling_height; int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; res = tex1Dfetch(input_tex_ref, current_input_elem_id); max_pos_y = 0; for(int j = 1; j < subsampling_height; ++j) { current_input_elem_id += input_width; float new_val = tex1Dfetch(input_tex_ref, current_input_elem_id); if (new_val > res) { res = new_val; max_pos_y = j; } } vals[local_thread_id] = res; max_pos_y_list[local_thread_id] = max_pos_y; } __syncthreads(); if (in_bounds && (window_x == 0)) { int max_pos_x = 0; for(int j = 1; j < subsampling_width; ++j) { local_thread_id++; float new_val = vals[local_thread_id]; int new_max_pos_y = max_pos_y_list[local_thread_id]; if (new_val > res) { res = new_val; max_pos_x = j; max_pos_y = new_max_pos_y; } } int offset = ((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x; output[offset] = res; max_positions[offset].x_y_pair = (max_pos_x << 16) | max_pos_y; } } __global__ void max_subsampling_2d_deriviative_upd_kernel( float * __restrict input_errors, const x_y_config * __restrict max_positions, const float * __restrict output_errors, const x_y_config * __restrict x_y_config_list, int subsampling_width, int subsampling_height, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count, int x_y_config_count) { int x_y_config_id = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; bool in_bounds = (entry_id < entry_count) && (x_y_config_id < x_y_config_count) && (feature_map_id < feature_map_count); if (in_bounds) { x_y_config xy = x_y_config_list[x_y_config_id]; int output_x = xy.x_y_pair >> 16; int output_y = xy.x_y_pair & 0xFFFF; int offset = ((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x; float output_error = output_errors[offset]; x_y_config max_pos_xy = max_positions[offset]; int max_pos_x = max_pos_xy.x_y_pair >> 16; int max_pos_y = max_pos_xy.x_y_pair & 0xFFFF; int input_x = output_x * subsampling_width + max_pos_x; int input_y = output_y * subsampling_height + max_pos_y; int input_offset = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; input_errors[input_offset] = output_error; } } namespace nnforge { namespace cuda { max_subsampling_2d_layer_updater_cuda::max_subsampling_2d_layer_updater_cuda() { input_tex_ref.addressMode[0] = hipAddressModeBorder; input_tex_ref.normalized = false; } max_subsampling_2d_layer_updater_cuda::~max_subsampling_2d_layer_updater_cuda() { } void max_subsampling_2d_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); cuda_safe_call(hipBindTexture(0, input_tex_ref, *input_neurons_buffer, desc, input_elem_count_per_entry * entry_count * sizeof(float))); float * output = *output_neurons_buffer; x_y_config * max_positions = (x_y_config *)((void *)(*additional_buffers[0])); int window_x_x_config_count = subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0]; const window_x_x_config * window_x_x_config_list = static_cast<const window_x_x_config *>((const void *)*additional_buffers[1]); int y_feature_map_config_count = output_configuration_specific.dimension_sizes[1] * output_configuration_specific.feature_map_count; const y_feature_map_config * y_feature_map_config_list = static_cast<const y_feature_map_config *>((const void *)*additional_buffers[2]); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, window_x_x_config_count, y_feature_map_config_count, entry_count, subsampling_sizes[0]); int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z; int smem_size = threadblock_size * (sizeof(float) + sizeof(window_x_window_y_config)); hipLaunchKernelGGL(( max_subsampling_2d_tex_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id, output, max_positions, window_x_x_config_list, y_feature_map_config_list, subsampling_sizes[0], subsampling_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.feature_map_count, entry_count, window_x_x_config_count, y_feature_map_config_count); } void max_subsampling_2d_layer_updater_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { cuda_util::set_with_value( *cuda_config, *input_errors_buffer, 0.0F, input_elem_count_per_entry * entry_count, stream_id); const float * output_errors = *output_errors_buffer; const x_y_config * max_positions = (const x_y_config *)((void *)(*additional_buffers[0])); float * input_errors = *input_errors_buffer; int x_y_config_count = output_configuration_specific.dimension_sizes[0] * output_configuration_specific.dimension_sizes[1]; const x_y_config * x_y_config_list = static_cast<const x_y_config *>((const void *)*additional_buffers[3]); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, x_y_config_count, output_configuration_specific.feature_map_count, entry_count); hipLaunchKernelGGL(( max_subsampling_2d_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, input_errors, max_positions, output_errors, x_y_config_list, subsampling_sizes[0], subsampling_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.feature_map_count, entry_count, x_y_config_count); } std::vector<unsigned int> max_subsampling_2d_layer_updater_cuda::get_linear_addressing_through_texture_per_entry() const { std::vector<unsigned int> res; res.push_back(input_elem_count_per_entry); return res; } void max_subsampling_2d_layer_updater_cuda::updater_configured() { if (!different_input) throw neural_network_exception("max_subsampling_2d_layer_updater_cuda is not able to run using the same input"); std::tr1::shared_ptr<const max_subsampling_layer> layer_derived = std::tr1::dynamic_pointer_cast<const max_subsampling_layer>(layer_schema); subsampling_sizes = layer_derived->subsampling_sizes; } bool max_subsampling_2d_layer_updater_cuda::is_in_place_backprop() const { return false; } std::vector<size_t> max_subsampling_2d_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(output_elem_count_per_entry * sizeof(float)); return res; } std::vector<size_t> max_subsampling_2d_layer_updater_cuda::get_sizes_of_additional_buffers_fixed() const { std::vector<size_t> res; res.push_back(sizeof(window_x_x_config) * subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0]); res.push_back(sizeof(y_feature_map_config) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.feature_map_count); res.push_back(sizeof(x_y_config) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[0]); return res; } void max_subsampling_2d_layer_updater_cuda::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const { { std::vector<window_x_x_config> task_list; for(int x = 0; x < output_configuration_specific.dimension_sizes[0]; ++x) for(int window_x = 0; window_x < subsampling_sizes[0]; ++window_x) task_list.push_back(window_x_x_config(window_x, x)); cuda_safe_call(hipMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(window_x_x_config) * task_list.size(), hipMemcpyHostToDevice)); } { std::vector<y_feature_map_config> task_list; for(int feature_map_id = 0; feature_map_id < output_configuration_specific.feature_map_count; ++feature_map_id) for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y) task_list.push_back(y_feature_map_config(y, feature_map_id)); cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(y_feature_map_config) * task_list.size(), hipMemcpyHostToDevice)); } { std::vector<x_y_config> task_list; for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y) for(int x = 0; x < output_configuration_specific.dimension_sizes[0]; ++x) task_list.push_back(x_y_config(x, y)); cuda_safe_call(hipMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(x_y_config) * task_list.size(), hipMemcpyHostToDevice)); } } } }
2f05d40d089038690b9cc636082e32b488b7f5de.cu
/* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "max_subsampling_2d_layer_updater_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "neural_network_cuda_exception.h" #include "../max_subsampling_layer.h" texture<float, cudaTextureType1D, cudaReadModeElementType> input_tex_ref; struct __align__(4) window_x_x_config { window_x_x_config(int window_x, int x) { this->window_x_x_pair = (((unsigned int)window_x) << 16) | (unsigned int)x; } unsigned int window_x_x_pair; }; struct __align__(4) y_feature_map_config { y_feature_map_config(int y, int feature_map_id) { this->y_feature_map_id_pair = (((unsigned int)y) << 16) | (unsigned int)feature_map_id; } unsigned int y_feature_map_id_pair; }; struct __align__(4) window_x_window_y_config { window_x_window_y_config(int window_x, int window_y) { this->window_x_window_y_pair = (((unsigned int)window_x) << 16) | (unsigned int)window_y; } unsigned int window_x_window_y_pair; }; struct __align__(4) x_y_config { x_y_config(int x, int y) { this->x_y_pair = (((unsigned int)x) << 16) | (unsigned int)y; } unsigned int x_y_pair; }; extern __shared__ float arr_sh[]; __global__ void max_subsampling_2d_tex_upd_kernel( float * __restrict output, x_y_config * __restrict max_positions, const window_x_x_config * __restrict window_x_x_config_list, const y_feature_map_config * __restrict y_feature_map_config_list, int subsampling_width, int subsampling_height, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count, int window_x_x_config_count, int y_feature_map_config_count) { int window_x_x_config_id = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; int local_thread_id = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x; int threadblock_size = blockDim.z * blockDim.y * blockDim.x; float * vals = arr_sh; int * max_pos_y_list = (int *)(vals + threadblock_size); bool in_bounds = (entry_id < entry_count) && (window_x_x_config_id < window_x_x_config_count) && (feature_map_config_id < y_feature_map_config_count); float res = -1.0e37F; int max_pos_y; int window_x; int output_x; int output_y; int feature_map_id; if (in_bounds) { window_x_x_config wxx = window_x_x_config_list[window_x_x_config_id]; output_x = wxx.window_x_x_pair & 0xFFFF; window_x = wxx.window_x_x_pair >> 16; y_feature_map_config yfm = y_feature_map_config_list[feature_map_config_id]; feature_map_id = yfm.y_feature_map_id_pair & 0xFFFF; output_y = yfm.y_feature_map_id_pair >> 16; int input_x = output_x * subsampling_width + window_x; int input_y = output_y * subsampling_height; int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; res = tex1Dfetch(input_tex_ref, current_input_elem_id); max_pos_y = 0; for(int j = 1; j < subsampling_height; ++j) { current_input_elem_id += input_width; float new_val = tex1Dfetch(input_tex_ref, current_input_elem_id); if (new_val > res) { res = new_val; max_pos_y = j; } } vals[local_thread_id] = res; max_pos_y_list[local_thread_id] = max_pos_y; } __syncthreads(); if (in_bounds && (window_x == 0)) { int max_pos_x = 0; for(int j = 1; j < subsampling_width; ++j) { local_thread_id++; float new_val = vals[local_thread_id]; int new_max_pos_y = max_pos_y_list[local_thread_id]; if (new_val > res) { res = new_val; max_pos_x = j; max_pos_y = new_max_pos_y; } } int offset = ((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x; output[offset] = res; max_positions[offset].x_y_pair = (max_pos_x << 16) | max_pos_y; } } __global__ void max_subsampling_2d_deriviative_upd_kernel( float * __restrict input_errors, const x_y_config * __restrict max_positions, const float * __restrict output_errors, const x_y_config * __restrict x_y_config_list, int subsampling_width, int subsampling_height, int input_width, int input_height, int output_width, int output_height, int feature_map_count, int entry_count, int x_y_config_count) { int x_y_config_id = blockIdx.x * blockDim.x + threadIdx.x; int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; bool in_bounds = (entry_id < entry_count) && (x_y_config_id < x_y_config_count) && (feature_map_id < feature_map_count); if (in_bounds) { x_y_config xy = x_y_config_list[x_y_config_id]; int output_x = xy.x_y_pair >> 16; int output_y = xy.x_y_pair & 0xFFFF; int offset = ((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x; float output_error = output_errors[offset]; x_y_config max_pos_xy = max_positions[offset]; int max_pos_x = max_pos_xy.x_y_pair >> 16; int max_pos_y = max_pos_xy.x_y_pair & 0xFFFF; int input_x = output_x * subsampling_width + max_pos_x; int input_y = output_y * subsampling_height + max_pos_y; int input_offset = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x; input_errors[input_offset] = output_error; } } namespace nnforge { namespace cuda { max_subsampling_2d_layer_updater_cuda::max_subsampling_2d_layer_updater_cuda() { input_tex_ref.addressMode[0] = cudaAddressModeBorder; input_tex_ref.normalized = false; } max_subsampling_2d_layer_updater_cuda::~max_subsampling_2d_layer_updater_cuda() { } void max_subsampling_2d_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cuda_safe_call(cudaBindTexture(0, input_tex_ref, *input_neurons_buffer, desc, input_elem_count_per_entry * entry_count * sizeof(float))); float * output = *output_neurons_buffer; x_y_config * max_positions = (x_y_config *)((void *)(*additional_buffers[0])); int window_x_x_config_count = subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0]; const window_x_x_config * window_x_x_config_list = static_cast<const window_x_x_config *>((const void *)*additional_buffers[1]); int y_feature_map_config_count = output_configuration_specific.dimension_sizes[1] * output_configuration_specific.feature_map_count; const y_feature_map_config * y_feature_map_config_list = static_cast<const y_feature_map_config *>((const void *)*additional_buffers[2]); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, window_x_x_config_count, y_feature_map_config_count, entry_count, subsampling_sizes[0]); int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z; int smem_size = threadblock_size * (sizeof(float) + sizeof(window_x_window_y_config)); max_subsampling_2d_tex_upd_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>( output, max_positions, window_x_x_config_list, y_feature_map_config_list, subsampling_sizes[0], subsampling_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.feature_map_count, entry_count, window_x_x_config_count, y_feature_map_config_count); } void max_subsampling_2d_layer_updater_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { cuda_util::set_with_value( *cuda_config, *input_errors_buffer, 0.0F, input_elem_count_per_entry * entry_count, stream_id); const float * output_errors = *output_errors_buffer; const x_y_config * max_positions = (const x_y_config *)((void *)(*additional_buffers[0])); float * input_errors = *input_errors_buffer; int x_y_config_count = output_configuration_specific.dimension_sizes[0] * output_configuration_specific.dimension_sizes[1]; const x_y_config * x_y_config_list = static_cast<const x_y_config *>((const void *)*additional_buffers[3]); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, x_y_config_count, output_configuration_specific.feature_map_count, entry_count); max_subsampling_2d_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( input_errors, max_positions, output_errors, x_y_config_list, subsampling_sizes[0], subsampling_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.feature_map_count, entry_count, x_y_config_count); } std::vector<unsigned int> max_subsampling_2d_layer_updater_cuda::get_linear_addressing_through_texture_per_entry() const { std::vector<unsigned int> res; res.push_back(input_elem_count_per_entry); return res; } void max_subsampling_2d_layer_updater_cuda::updater_configured() { if (!different_input) throw neural_network_exception("max_subsampling_2d_layer_updater_cuda is not able to run using the same input"); std::tr1::shared_ptr<const max_subsampling_layer> layer_derived = std::tr1::dynamic_pointer_cast<const max_subsampling_layer>(layer_schema); subsampling_sizes = layer_derived->subsampling_sizes; } bool max_subsampling_2d_layer_updater_cuda::is_in_place_backprop() const { return false; } std::vector<size_t> max_subsampling_2d_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(output_elem_count_per_entry * sizeof(float)); return res; } std::vector<size_t> max_subsampling_2d_layer_updater_cuda::get_sizes_of_additional_buffers_fixed() const { std::vector<size_t> res; res.push_back(sizeof(window_x_x_config) * subsampling_sizes[0] * output_configuration_specific.dimension_sizes[0]); res.push_back(sizeof(y_feature_map_config) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.feature_map_count); res.push_back(sizeof(x_y_config) * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[0]); return res; } void max_subsampling_2d_layer_updater_cuda::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const { { std::vector<window_x_x_config> task_list; for(int x = 0; x < output_configuration_specific.dimension_sizes[0]; ++x) for(int window_x = 0; window_x < subsampling_sizes[0]; ++window_x) task_list.push_back(window_x_x_config(window_x, x)); cuda_safe_call(cudaMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(window_x_x_config) * task_list.size(), cudaMemcpyHostToDevice)); } { std::vector<y_feature_map_config> task_list; for(int feature_map_id = 0; feature_map_id < output_configuration_specific.feature_map_count; ++feature_map_id) for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y) task_list.push_back(y_feature_map_config(y, feature_map_id)); cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(y_feature_map_config) * task_list.size(), cudaMemcpyHostToDevice)); } { std::vector<x_y_config> task_list; for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y) for(int x = 0; x < output_configuration_specific.dimension_sizes[0]; ++x) task_list.push_back(x_y_config(x, y)); cuda_safe_call(cudaMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(x_y_config) * task_list.size(), cudaMemcpyHostToDevice)); } } } }
ee09c3f27a224145e1f8332ffbb82bdcf207ea06.hip
// !!! This is a file automatically generated by hipify!!! #include <gtx/string_cast.hpp> #include <gtest/gtest.h> #include <thrust/device_vector.h> #include "Utils.hpp" #include "CudaUtils_hip.cuh" #include "renderer/CudaRender.cuh" #include "CudaTest.cuh" using namespace utils; using namespace utils::cuda; using namespace render; using namespace render::cuda; std::ostream& operator << (std::ostream& os, const Ray& ray) { return os << "<Ray: origin(" << ray.origin.x << ";" << ray.origin.y << ";" << ray.origin.z << "), direction(" << ray.direction.x << ";" << ray.direction.y << ";" << ray.direction.z << ")>"; } void dump_rays(const Ray& ray_1, const Ray& ray_2) { std::cout << " - " << ray_1 << std::endl << " - " << ray_2 << std::endl; } std::ostream& operator << (std::ostream& os, const Hit& hit) { static constexpr char MISSED_TEMPLATE[] = "<Hit: is_hitted - 0>"; static constexpr char HITTED_TEMPLATE[] = "<Hit: t_near - %3.5f, t_far - %3.5f, point - (%.3f;%.3f;%.3f)" ", normal - (%.3f; %.3f; %.3f)>"; static char BUFFER[256]; size_t count = 0; if (!hit.is_hitted()) { count = sprintf(BUFFER, MISSED_TEMPLATE); } else { const Point& point = hit.point(); const Vector& normal = hit.normal(); count = sprintf(BUFFER, HITTED_TEMPLATE, hit.t_near(), hit.t_far(), point.x, point.y, point.z, normal.x, normal.y, normal.z); } os.write(BUFFER, count); return os; } void dump_hits(const Hit& hit_1, const Hit& hit_2) { std::cout << " - " << hit_1 << std::endl << " - " << hit_2 << std::endl; } Point CameraTest::z_positive(0.0, 0.0, 20.0); Point CameraTest::z_negative(0.0, 0.0, -20.0); Point CameraTest::x_positive(20.0, 0.0, 0.0); Point CameraTest::x_negative(-20.0, 0.0, 0.0); Point CameraTest::y_negative(0.0, -20.0, 0.0); Point CameraTest::y_positive(0.0, 20.0, 0.0); CameraTest::CameraTest() : testing::Test() , _camera(Point(0.0, 0.0, 20.0), Point(0.0, 0.0, 0.0), to_radian(50), 20, 20) {} TEST_F(CameraTest, emit_ray) { Ray ray_1 = _camera.emit_ray(0, 0); std::cout << ray_1 << std::endl; } TEST_F(CameraTest, hit_sphere) { Sphere sphere(Point(0.0, 0.0, 0.0), 5.0, Material()); Ray ray = _camera.emit_ray(0, 0); std::cout << ray << std::endl; Hit hit = sphere.hit(ray); std::cout << hit << std::endl; EXPECT_FALSE(hit.is_hitted()); std::cout << std::endl; ray = _camera.emit_ray(_camera.width() / 2, _camera.height() / 2); std::cout << ray << std::endl; hit = sphere.hit(ray); std::cout << hit << std::endl; EXPECT_TRUE(hit.is_hitted()); } TEST_F(CameraTest, update_position) { Sphere sphere(Point(0.0, 0.0, 0.0), 5.0, Material()); int w_pos = _camera.width() / 2; int h_pos = _camera.height() / 2; Ray ray = _camera.emit_ray(w_pos, h_pos); EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(x_positive); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(x_negative); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(y_positive); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(y_negative); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(z_positive); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(z_negative); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(x_positive + y_positive + z_positive); _camera.dump(); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); } TEST_F(CameraTest, move_camera_near_z) { Sphere sphere(Point(0.0, 0.0, 0.0), 5.0, Material()); int h_pos = _camera.height() / 2; int w_pos = _camera.width() / 2; _camera.move_left(); _camera.dump(); Ray ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.move_right(); _camera.move_right(); _camera.dump(); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(z_positive); _camera.move_up(); _camera.dump(); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); for (size_t i = 0; i < 100; ++i) { _camera.move_down(); } EXPECT_TRUE(::fabs(::fabs(_camera.position().y) - 49.5) < 0.0000001); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); } static CudaRender& inst_render(int width = 0, int height = 0) { static CudaRender _render = make_render(width, height); return _render; } static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods); int main(int argc, char** argv) { atexit(post_processing); testing::InitGoogleTest(&argc, argv); int test_run_result = RUN_ALL_TESTS(); const int width = 860; const int height = 640; GLFWwindow* window = load_glfw("cuda raytracer", width, height); glfwSetKeyCallback(window, key_callback); load_opengl(); CudaRender& render = inst_render(width, height); while (!glfwWindowShouldClose(window)) { render.render(); glfwSwapBuffers(window); render.draw(); glfwPollEvents(); } exit(test_run_result); } static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods) { switch (key) { case GLFW_KEY_ESCAPE: glfwSetWindowShouldClose(window, true); break; case GLFW_KEY_W: inst_render().camera()->move_forward(); break; case GLFW_KEY_S: inst_render().camera()->move_backward(); break; case GLFW_KEY_A: inst_render().camera()->move_left(); break; case GLFW_KEY_D: inst_render().camera()->move_right(); break; case GLFW_KEY_UP: inst_render().camera()->move_up(); break; case GLFW_KEY_DOWN: inst_render().camera()->move_down(); break; case GLFW_KEY_KP_8: inst_render().camera()->update_position(Point(0.0, 100.0, 0.0)); break; case GLFW_KEY_KP_2: inst_render().camera()->update_position(Point(0.0, -100.0, 0.0)); break; case GLFW_KEY_KP_5: { if (action != GLFW_PRESS) return; static bool is_front = true; is_front ? inst_render().camera()->update_position(Point(0.0, 0.0, -100.0)) : inst_render().camera()->update_position(Point(0.0, 0.0, 100.0)); is_front = !is_front; break; } case GLFW_KEY_KP_4: inst_render().camera()->update_position(Point(-100.0, 0.0, 0.0)); break; case GLFW_KEY_KP_6: inst_render().camera()->update_position(Point(100.0, 0.0, 0.0)); break; default: std::cout << key << std::endl; } }
ee09c3f27a224145e1f8332ffbb82bdcf207ea06.cu
#include <gtx/string_cast.hpp> #include <gtest/gtest.h> #include <thrust/device_vector.h> #include "Utils.hpp" #include "CudaUtils.cuh" #include "renderer/CudaRender.cuh" #include "CudaTest.cuh" using namespace utils; using namespace utils::cuda; using namespace render; using namespace render::cuda; std::ostream& operator << (std::ostream& os, const Ray& ray) { return os << "<Ray: origin(" << ray.origin.x << ";" << ray.origin.y << ";" << ray.origin.z << "), direction(" << ray.direction.x << ";" << ray.direction.y << ";" << ray.direction.z << ")>"; } void dump_rays(const Ray& ray_1, const Ray& ray_2) { std::cout << " - " << ray_1 << std::endl << " - " << ray_2 << std::endl; } std::ostream& operator << (std::ostream& os, const Hit& hit) { static constexpr char MISSED_TEMPLATE[] = "<Hit: is_hitted - 0>"; static constexpr char HITTED_TEMPLATE[] = "<Hit: t_near - %3.5f, t_far - %3.5f, point - (%.3f;%.3f;%.3f)" ", normal - (%.3f; %.3f; %.3f)>"; static char BUFFER[256]; size_t count = 0; if (!hit.is_hitted()) { count = sprintf(BUFFER, MISSED_TEMPLATE); } else { const Point& point = hit.point(); const Vector& normal = hit.normal(); count = sprintf(BUFFER, HITTED_TEMPLATE, hit.t_near(), hit.t_far(), point.x, point.y, point.z, normal.x, normal.y, normal.z); } os.write(BUFFER, count); return os; } void dump_hits(const Hit& hit_1, const Hit& hit_2) { std::cout << " - " << hit_1 << std::endl << " - " << hit_2 << std::endl; } Point CameraTest::z_positive(0.0, 0.0, 20.0); Point CameraTest::z_negative(0.0, 0.0, -20.0); Point CameraTest::x_positive(20.0, 0.0, 0.0); Point CameraTest::x_negative(-20.0, 0.0, 0.0); Point CameraTest::y_negative(0.0, -20.0, 0.0); Point CameraTest::y_positive(0.0, 20.0, 0.0); CameraTest::CameraTest() : testing::Test() , _camera(Point(0.0, 0.0, 20.0), Point(0.0, 0.0, 0.0), to_radian(50), 20, 20) {} TEST_F(CameraTest, emit_ray) { Ray ray_1 = _camera.emit_ray(0, 0); std::cout << ray_1 << std::endl; } TEST_F(CameraTest, hit_sphere) { Sphere sphere(Point(0.0, 0.0, 0.0), 5.0, Material()); Ray ray = _camera.emit_ray(0, 0); std::cout << ray << std::endl; Hit hit = sphere.hit(ray); std::cout << hit << std::endl; EXPECT_FALSE(hit.is_hitted()); std::cout << std::endl; ray = _camera.emit_ray(_camera.width() / 2, _camera.height() / 2); std::cout << ray << std::endl; hit = sphere.hit(ray); std::cout << hit << std::endl; EXPECT_TRUE(hit.is_hitted()); } TEST_F(CameraTest, update_position) { Sphere sphere(Point(0.0, 0.0, 0.0), 5.0, Material()); int w_pos = _camera.width() / 2; int h_pos = _camera.height() / 2; Ray ray = _camera.emit_ray(w_pos, h_pos); EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(x_positive); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(x_negative); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(y_positive); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(y_negative); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(z_positive); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(z_negative); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(x_positive + y_positive + z_positive); _camera.dump(); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); } TEST_F(CameraTest, move_camera_near_z) { Sphere sphere(Point(0.0, 0.0, 0.0), 5.0, Material()); int h_pos = _camera.height() / 2; int w_pos = _camera.width() / 2; _camera.move_left(); _camera.dump(); Ray ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.move_right(); _camera.move_right(); _camera.dump(); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); _camera.update_position(z_positive); _camera.move_up(); _camera.dump(); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); for (size_t i = 0; i < 100; ++i) { _camera.move_down(); } EXPECT_TRUE(std::fabs(std::fabs(_camera.position().y) - 49.5) < 0.0000001); ray = _camera.emit_ray(w_pos, h_pos); std::cout << ray << std::endl; EXPECT_TRUE(sphere.hit(ray).is_hitted()); } static CudaRender& inst_render(int width = 0, int height = 0) { static CudaRender _render = make_render(width, height); return _render; } static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods); int main(int argc, char** argv) { atexit(post_processing); testing::InitGoogleTest(&argc, argv); int test_run_result = RUN_ALL_TESTS(); const int width = 860; const int height = 640; GLFWwindow* window = load_glfw("cuda raytracer", width, height); glfwSetKeyCallback(window, key_callback); load_opengl(); CudaRender& render = inst_render(width, height); while (!glfwWindowShouldClose(window)) { render.render(); glfwSwapBuffers(window); render.draw(); glfwPollEvents(); } exit(test_run_result); } static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods) { switch (key) { case GLFW_KEY_ESCAPE: glfwSetWindowShouldClose(window, true); break; case GLFW_KEY_W: inst_render().camera()->move_forward(); break; case GLFW_KEY_S: inst_render().camera()->move_backward(); break; case GLFW_KEY_A: inst_render().camera()->move_left(); break; case GLFW_KEY_D: inst_render().camera()->move_right(); break; case GLFW_KEY_UP: inst_render().camera()->move_up(); break; case GLFW_KEY_DOWN: inst_render().camera()->move_down(); break; case GLFW_KEY_KP_8: inst_render().camera()->update_position(Point(0.0, 100.0, 0.0)); break; case GLFW_KEY_KP_2: inst_render().camera()->update_position(Point(0.0, -100.0, 0.0)); break; case GLFW_KEY_KP_5: { if (action != GLFW_PRESS) return; static bool is_front = true; is_front ? inst_render().camera()->update_position(Point(0.0, 0.0, -100.0)) : inst_render().camera()->update_position(Point(0.0, 0.0, 100.0)); is_front = !is_front; break; } case GLFW_KEY_KP_4: inst_render().camera()->update_position(Point(-100.0, 0.0, 0.0)); break; case GLFW_KEY_KP_6: inst_render().camera()->update_position(Point(100.0, 0.0, 0.0)); break; default: std::cout << key << std::endl; } }
b4593637d09d6b96ceb1207887d71338e36b01b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_initialise_chunk_kernel_celly; int xdim0_initialise_chunk_kernel_celly_h = -1; int ydim0_initialise_chunk_kernel_celly_h = -1; __constant__ int xdim1_initialise_chunk_kernel_celly; int xdim1_initialise_chunk_kernel_celly_h = -1; int ydim1_initialise_chunk_kernel_celly_h = -1; __constant__ int xdim2_initialise_chunk_kernel_celly; int xdim2_initialise_chunk_kernel_celly_h = -1; int ydim2_initialise_chunk_kernel_celly_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #define OPS_ACC0(x,y) (x+xdim0_initialise_chunk_kernel_celly*(y)) #define OPS_ACC1(x,y) (x+xdim1_initialise_chunk_kernel_celly*(y)) #define OPS_ACC2(x,y) (x+xdim2_initialise_chunk_kernel_celly*(y)) //user function __device__ void initialise_chunk_kernel_celly_gpu(const double *vertexy, double *celly, double *celldy) { double d_y; d_y = (grid.ymax - grid.ymin)/(double)grid.y_cells; celly[OPS_ACC1(0,0)] = 0.5*( vertexy[OPS_ACC0(0,0)]+ vertexy[OPS_ACC0(0,1)] ); celldy[OPS_ACC2(0,0)] = d_y; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 __global__ void ops_initialise_chunk_kernel_celly( const double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 0*1 + idx_y * 1*1 * xdim0_initialise_chunk_kernel_celly; arg1 += idx_x * 0*1 + idx_y * 1*1 * xdim1_initialise_chunk_kernel_celly; arg2 += idx_x * 0*1 + idx_y * 1*1 * xdim2_initialise_chunk_kernel_celly; if (idx_x < size0 && idx_y < size1) { initialise_chunk_kernel_celly_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_initialise_chunk_kernel_celly(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args,3,range,13)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(13,"initialise_chunk_kernel_celly"); OPS_kernels[13].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; if (xdim0 != xdim0_initialise_chunk_kernel_celly_h || xdim1 != xdim1_initialise_chunk_kernel_celly_h || xdim2 != xdim2_initialise_chunk_kernel_celly_h) { hipMemcpyToSymbol( xdim0_initialise_chunk_kernel_celly, &xdim0, sizeof(int) ); xdim0_initialise_chunk_kernel_celly_h = xdim0; hipMemcpyToSymbol( xdim1_initialise_chunk_kernel_celly, &xdim1, sizeof(int) ); xdim1_initialise_chunk_kernel_celly_h = xdim1; hipMemcpyToSymbol( xdim2_initialise_chunk_kernel_celly, &xdim2, sizeof(int) ); xdim2_initialise_chunk_kernel_celly_h = xdim2; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); p_a[2] = (char *)args[2].data_d + base2; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[13].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_initialise_chunk_kernel_celly), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2],x_size, y_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[13].time += t1-t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[13].mpi_time += t2-t1; OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg2); } }
b4593637d09d6b96ceb1207887d71338e36b01b6.cu
// // auto-generated by ops.py // __constant__ int xdim0_initialise_chunk_kernel_celly; int xdim0_initialise_chunk_kernel_celly_h = -1; int ydim0_initialise_chunk_kernel_celly_h = -1; __constant__ int xdim1_initialise_chunk_kernel_celly; int xdim1_initialise_chunk_kernel_celly_h = -1; int ydim1_initialise_chunk_kernel_celly_h = -1; __constant__ int xdim2_initialise_chunk_kernel_celly; int xdim2_initialise_chunk_kernel_celly_h = -1; int ydim2_initialise_chunk_kernel_celly_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #define OPS_ACC0(x,y) (x+xdim0_initialise_chunk_kernel_celly*(y)) #define OPS_ACC1(x,y) (x+xdim1_initialise_chunk_kernel_celly*(y)) #define OPS_ACC2(x,y) (x+xdim2_initialise_chunk_kernel_celly*(y)) //user function __device__ void initialise_chunk_kernel_celly_gpu(const double *vertexy, double *celly, double *celldy) { double d_y; d_y = (grid.ymax - grid.ymin)/(double)grid.y_cells; celly[OPS_ACC1(0,0)] = 0.5*( vertexy[OPS_ACC0(0,0)]+ vertexy[OPS_ACC0(0,1)] ); celldy[OPS_ACC2(0,0)] = d_y; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 __global__ void ops_initialise_chunk_kernel_celly( const double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 0*1 + idx_y * 1*1 * xdim0_initialise_chunk_kernel_celly; arg1 += idx_x * 0*1 + idx_y * 1*1 * xdim1_initialise_chunk_kernel_celly; arg2 += idx_x * 0*1 + idx_y * 1*1 * xdim2_initialise_chunk_kernel_celly; if (idx_x < size0 && idx_y < size1) { initialise_chunk_kernel_celly_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_initialise_chunk_kernel_celly(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args,3,range,13)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(13,"initialise_chunk_kernel_celly"); OPS_kernels[13].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; if (xdim0 != xdim0_initialise_chunk_kernel_celly_h || xdim1 != xdim1_initialise_chunk_kernel_celly_h || xdim2 != xdim2_initialise_chunk_kernel_celly_h) { cudaMemcpyToSymbol( xdim0_initialise_chunk_kernel_celly, &xdim0, sizeof(int) ); xdim0_initialise_chunk_kernel_celly_h = xdim0; cudaMemcpyToSymbol( xdim1_initialise_chunk_kernel_celly, &xdim1, sizeof(int) ); xdim1_initialise_chunk_kernel_celly_h = xdim1; cudaMemcpyToSymbol( xdim2_initialise_chunk_kernel_celly, &xdim2, sizeof(int) ); xdim2_initialise_chunk_kernel_celly_h = xdim2; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); p_a[2] = (char *)args[2].data_d + base2; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[13].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data ops_initialise_chunk_kernel_celly<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2],x_size, y_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[13].time += t1-t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[13].mpi_time += t2-t1; OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[13].transfer += ops_compute_transfer(dim, start, end, &arg2); } }
7e837848271744bfba0e02e5f5f25263784f1bde.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Julian Gutierrez * Northeastern University * High Performance Computing * * Sobel Algorithm Implementation * */ #include "sobel.h" /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); exit(-1); } #endif return result; } using namespace std; void modThreshold (unsigned int value){ threshold = value; } /* * Sobel Kernel */ __global__ void sobelAlgorithm(unsigned int *intensity, unsigned int *result, unsigned int threshold){ int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; // Including border __shared__ unsigned char inTile[TILE_SIZE+2][TILE_SIZE+2]; // input __shared__ unsigned char outTile[TILE_SIZE+2][TILE_SIZE+2]; // output // Read Input Data into Shared Memory ///////////////////////////////////////////////////////////////////////////////////// int x = bx<<BTSB; x = x + tx; x = x<<TTSB; int y = by<<BTSB; y = y + ty; y = y<<TTSB; int location = (((x>>TTSB)&BTSMask) ) | (((y>>TTSB)&BTSMask) << BTSB ) | ((x>>TSB) << (BTSB+BTSB) ) ; location += ((y>>TSB)<<(BTSB+BTSB))*gridDim.x; int intensityData = intensity[location]; int sharedX = tx*THREAD_TILE_SIZE+1; int sharedY = ty*THREAD_TILE_SIZE+1; inTile[sharedY ][sharedX ] = intensityData & 0xFF; inTile[sharedY ][sharedX+1] = (intensityData >> 8) & 0xFF; inTile[sharedY+1][sharedX ] = (intensityData >> 16) & 0xFF; inTile[sharedY+1][sharedX+1] = (intensityData >> 24) & 0xFF; // Read Border Data into Shared Memory ///////////////////////////////////////////////////////////////////////////////////// // Registers meant for speed. Two given each thread will update 2 pixels. int shiftTileReg1 = 0; int shiftTileReg2 = 0; int borderXLoc = 0; int borderYLoc = 0; // Needed Variables int bLocation; int borderIntData; // Update horizontal border borderXLoc = sharedX; if (ty == 0 ){ // Location to write in shared memory borderYLoc = 0; if (by != 0) { // Upper block border y-=THREAD_TILE_SIZE; shiftTileReg1 = 16; shiftTileReg2 = 24; } } else if (ty == BLOCK_TILE_SIZE-1){ // Location to write in shared memory borderYLoc = TILE_SIZE+1; if (by != gridDim.y-1) { // Lower block border y+=THREAD_TILE_SIZE; shiftTileReg1 = 0; shiftTileReg2 = 8; } } // Read from global and write to shared memory if (ty == 0 || ty == BLOCK_TILE_SIZE-1) { if ((by == 0 && ty == 0 ) || (by == gridDim.y-1 && ty == BLOCK_TILE_SIZE-1)){ inTile[borderYLoc][borderXLoc ] = 0; inTile[borderYLoc][borderXLoc+1] = 0; } else { bLocation = (((x>>TTSB)&BTSMask) ) | (((y>>TTSB)&BTSMask) << BTSB ) | ((x>>TSB) << (BTSB+BTSB) ) ; bLocation += ((y>>TSB)<<(BTSB+BTSB))*gridDim.x; borderIntData = intensity[bLocation]; inTile[borderYLoc][borderXLoc ] = ( borderIntData >> shiftTileReg1 ) & 0xFF; inTile[borderYLoc][borderXLoc+1] = ( borderIntData >> shiftTileReg2 ) & 0xFF; } } // Update vertical border x = bx<<BTSB; x = x + tx; x = x<<TTSB; y = by<<BTSB; y = y + ty; y = y<<TTSB; borderYLoc = sharedY; if (tx == 0 ){ // Location to write in shared memory borderXLoc = 0; if (bx != 0) { // Upper block border x-=THREAD_TILE_SIZE; shiftTileReg1 = 8; shiftTileReg2 = 24; } } else if (tx == BLOCK_TILE_SIZE-1){ // Location to write in shared memory borderXLoc = TILE_SIZE+1; if (bx != gridDim.x-1) { // Lower block border x+=THREAD_TILE_SIZE; shiftTileReg1 = 0; shiftTileReg2 = 16; } } // Read from global and write to shared memory if (tx == 0 || tx == BLOCK_TILE_SIZE-1) { if ((bx == 0 && tx == 0 ) || (bx == gridDim.x-1 && tx == BLOCK_TILE_SIZE-1)){ inTile[borderYLoc][borderXLoc ] = 0; inTile[borderYLoc+1][borderXLoc] = 0; } else { bLocation = (((x>>TTSB)&BTSMask) ) | (((y>>TTSB)&BTSMask) << BTSB ) | ((x>>TSB) << (BTSB+BTSB) ) ; bLocation += ((y>>TSB)<<(BTSB+BTSB))*gridDim.x; borderIntData = intensity[bLocation]; inTile[borderYLoc][borderXLoc ] = ( borderIntData >> shiftTileReg1 ) & 0xFF; inTile[borderYLoc+1][borderXLoc] = ( borderIntData >> shiftTileReg2 ) & 0xFF; } } x = bx<<BTSB; x = x + tx; x = x<<TTSB; y = by<<BTSB; y = y + ty; y = y<<TTSB; // Corners for Border shiftTileReg1 = 0; if ((tx == 0 || tx == BLOCK_TILE_SIZE-1) && (ty == 0 || ty == BLOCK_TILE_SIZE-1)){ if (tx == 0) { borderXLoc = 0; x-=THREAD_TILE_SIZE; } else { borderXLoc = TILE_SIZE+1; x+=THREAD_TILE_SIZE; shiftTileReg1 += 16; } if (ty == 0) { borderYLoc = 0; y-=THREAD_TILE_SIZE; } else { borderYLoc = TILE_SIZE+1; y+=THREAD_TILE_SIZE; shiftTileReg1 += 8; } if ( ((tx == 0 && ty == 0 ) && (bx == 0 || by == 0 )) || ((tx == 0 && ty == BLOCK_TILE_SIZE-1) && (bx == 0 || by == gridDim.y-1 )) || ((tx == BLOCK_TILE_SIZE-1 && ty == 0 ) && (bx == gridDim.x-1 || by == 0 )) || ((tx == BLOCK_TILE_SIZE-1 && ty == BLOCK_TILE_SIZE-1) && (bx == gridDim.x-1 || by == gridDim.y-1 )) ){ inTile[borderYLoc][borderXLoc] = 0; } else { bLocation = (((x>>TTSB)&BTSMask) ) | (((y>>TTSB)&BTSMask) << BTSB ) | ((x>>TSB) << (BTSB+BTSB) ) ; bLocation +=((y>>TSB) << (BTSB+BTSB) )*gridDim.x; intensityData = intensity [bLocation]; inTile[borderYLoc][borderXLoc] = (intensityData >> shiftTileReg1 ) & 0xFF; } } __syncthreads(); // Algorithm ///////////////////////////////////////////////////////////////////// for (int tempY = ty+1; tempY <= TILE_SIZE; tempY+=BLOCK_TILE_SIZE ){ for (int tempX = tx+1; tempX <= TILE_SIZE; tempX+=BLOCK_TILE_SIZE ){ int sum1 = inTile[tempY-1][tempX+1] - inTile[tempY-1][tempX-1] + 2 * (inTile[tempY ][tempX+1] - inTile[tempY ][tempX-1]) + inTile[tempY+1][tempX+1] - inTile[tempY+1][tempX-1]; int sum2 = inTile[tempY-1][tempX-1] + inTile[tempY-1][tempX+1] + 2 * (inTile[tempY-1][tempX ] - inTile[tempY+1][tempX ]) - inTile[tempY+1][tempX-1] - inTile[tempY+1][tempX+1]; int magnitude = sum1*sum1+sum2*sum2; if(magnitude > threshold) outTile[tempY][tempX] = 255; else outTile[tempY][tempX] = 0; } } __syncthreads(); // Write back result int intData1 = outTile[sharedY ][sharedX ] & 0xFF; int intData2 = outTile[sharedY ][sharedX+1] & 0xFF; int intData3 = outTile[sharedY+1][sharedX ] & 0xFF; int intData4 = outTile[sharedY+1][sharedX+1] & 0xFF; int intReturnData = intData1 | (intData2 << 8 ) | (intData3 << 16) | (intData4 << 24); result[location] = intReturnData; } unsigned char *sobel(unsigned char *intensity, unsigned int height, unsigned int width){ #if defined(DEBUG) printf("Printing input data\n"); printf("Height: %d\n", height); printf("Width: %d\n", width); #endif int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory #if defined(VERBOSE) printf ("Allocating arrays in GPU memory.\n"); #endif #if defined(CUDA_TIMING) float Ttime; TIMER_CREATE(Ttime); TIMER_START(Ttime); #endif checkCuda(hipMalloc((void**)&gpu.intensity , gpu.size*sizeof(char))); checkCuda(hipMalloc((void**)&gpu.result , gpu.size*sizeof(char))); checkCuda(hipMemset(gpu.result , 0 , gpu.size)); checkCuda(hipMemset(gpu.intensity , 0 , gpu.size)); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(hipMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), hipMemcpyHostToDevice)); checkCuda(hipDeviceSynchronize()); #if defined(CUDA_TIMING) float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif #if defined(VERBOSE) printf("Running algorithm on GPU.\n"); #endif dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); // Launch kernel to begin image segmenation hipLaunchKernelGGL(( sobelAlgorithm), dim3(dimGrid), dim3(dimBlock), 0, 0, (unsigned int *)gpu.intensity, (unsigned int *)gpu.result, threshold); checkCuda(hipDeviceSynchronize()); #if defined(CUDA_TIMING) TIMER_END(Ktime); printf("Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(hipMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), hipMemcpyDeviceToHost)); // Free resources and end the program checkCuda(hipFree(gpu.intensity)); checkCuda(hipFree(gpu.result)); #if defined(CUDA_TIMING) TIMER_END(Ttime); printf("Total GPU Execution Time: %f ms\n", Ttime); #endif return(gpu.resultOnCPU); } unsigned char *sobelWarmup(unsigned char *intensity, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(hipMalloc((void**)&gpu.intensity , gpu.size*sizeof(char))); checkCuda(hipMalloc((void**)&gpu.result , gpu.size*sizeof(char))); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(hipMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), hipMemcpyHostToDevice)); checkCuda(hipDeviceSynchronize()); dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); // Launch kernel to begin image segmenation hipLaunchKernelGGL(( sobelAlgorithm), dim3(dimGrid), dim3(dimBlock), 0, 0, (unsigned int *)gpu.intensity, (unsigned int *)gpu.result, threshold); checkCuda(hipDeviceSynchronize()); // Retrieve results from the GPU checkCuda(hipMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), hipMemcpyDeviceToHost)); // Free resources and end the program checkCuda(hipFree(gpu.intensity)); checkCuda(hipFree(gpu.result)); return(gpu.resultOnCPU); }
7e837848271744bfba0e02e5f5f25263784f1bde.cu
/* Julian Gutierrez * Northeastern University * High Performance Computing * * Sobel Algorithm Implementation * */ #include "sobel.h" /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); exit(-1); } #endif return result; } using namespace std; void modThreshold (unsigned int value){ threshold = value; } /* * Sobel Kernel */ __global__ void sobelAlgorithm(unsigned int *intensity, unsigned int *result, unsigned int threshold){ int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; // Including border __shared__ unsigned char inTile[TILE_SIZE+2][TILE_SIZE+2]; // input __shared__ unsigned char outTile[TILE_SIZE+2][TILE_SIZE+2]; // output // Read Input Data into Shared Memory ///////////////////////////////////////////////////////////////////////////////////// int x = bx<<BTSB; x = x + tx; x = x<<TTSB; int y = by<<BTSB; y = y + ty; y = y<<TTSB; int location = (((x>>TTSB)&BTSMask) ) | (((y>>TTSB)&BTSMask) << BTSB ) | ((x>>TSB) << (BTSB+BTSB) ) ; location += ((y>>TSB)<<(BTSB+BTSB))*gridDim.x; int intensityData = intensity[location]; int sharedX = tx*THREAD_TILE_SIZE+1; int sharedY = ty*THREAD_TILE_SIZE+1; inTile[sharedY ][sharedX ] = intensityData & 0xFF; inTile[sharedY ][sharedX+1] = (intensityData >> 8) & 0xFF; inTile[sharedY+1][sharedX ] = (intensityData >> 16) & 0xFF; inTile[sharedY+1][sharedX+1] = (intensityData >> 24) & 0xFF; // Read Border Data into Shared Memory ///////////////////////////////////////////////////////////////////////////////////// // Registers meant for speed. Two given each thread will update 2 pixels. int shiftTileReg1 = 0; int shiftTileReg2 = 0; int borderXLoc = 0; int borderYLoc = 0; // Needed Variables int bLocation; int borderIntData; // Update horizontal border borderXLoc = sharedX; if (ty == 0 ){ // Location to write in shared memory borderYLoc = 0; if (by != 0) { // Upper block border y-=THREAD_TILE_SIZE; shiftTileReg1 = 16; shiftTileReg2 = 24; } } else if (ty == BLOCK_TILE_SIZE-1){ // Location to write in shared memory borderYLoc = TILE_SIZE+1; if (by != gridDim.y-1) { // Lower block border y+=THREAD_TILE_SIZE; shiftTileReg1 = 0; shiftTileReg2 = 8; } } // Read from global and write to shared memory if (ty == 0 || ty == BLOCK_TILE_SIZE-1) { if ((by == 0 && ty == 0 ) || (by == gridDim.y-1 && ty == BLOCK_TILE_SIZE-1)){ inTile[borderYLoc][borderXLoc ] = 0; inTile[borderYLoc][borderXLoc+1] = 0; } else { bLocation = (((x>>TTSB)&BTSMask) ) | (((y>>TTSB)&BTSMask) << BTSB ) | ((x>>TSB) << (BTSB+BTSB) ) ; bLocation += ((y>>TSB)<<(BTSB+BTSB))*gridDim.x; borderIntData = intensity[bLocation]; inTile[borderYLoc][borderXLoc ] = ( borderIntData >> shiftTileReg1 ) & 0xFF; inTile[borderYLoc][borderXLoc+1] = ( borderIntData >> shiftTileReg2 ) & 0xFF; } } // Update vertical border x = bx<<BTSB; x = x + tx; x = x<<TTSB; y = by<<BTSB; y = y + ty; y = y<<TTSB; borderYLoc = sharedY; if (tx == 0 ){ // Location to write in shared memory borderXLoc = 0; if (bx != 0) { // Upper block border x-=THREAD_TILE_SIZE; shiftTileReg1 = 8; shiftTileReg2 = 24; } } else if (tx == BLOCK_TILE_SIZE-1){ // Location to write in shared memory borderXLoc = TILE_SIZE+1; if (bx != gridDim.x-1) { // Lower block border x+=THREAD_TILE_SIZE; shiftTileReg1 = 0; shiftTileReg2 = 16; } } // Read from global and write to shared memory if (tx == 0 || tx == BLOCK_TILE_SIZE-1) { if ((bx == 0 && tx == 0 ) || (bx == gridDim.x-1 && tx == BLOCK_TILE_SIZE-1)){ inTile[borderYLoc][borderXLoc ] = 0; inTile[borderYLoc+1][borderXLoc] = 0; } else { bLocation = (((x>>TTSB)&BTSMask) ) | (((y>>TTSB)&BTSMask) << BTSB ) | ((x>>TSB) << (BTSB+BTSB) ) ; bLocation += ((y>>TSB)<<(BTSB+BTSB))*gridDim.x; borderIntData = intensity[bLocation]; inTile[borderYLoc][borderXLoc ] = ( borderIntData >> shiftTileReg1 ) & 0xFF; inTile[borderYLoc+1][borderXLoc] = ( borderIntData >> shiftTileReg2 ) & 0xFF; } } x = bx<<BTSB; x = x + tx; x = x<<TTSB; y = by<<BTSB; y = y + ty; y = y<<TTSB; // Corners for Border shiftTileReg1 = 0; if ((tx == 0 || tx == BLOCK_TILE_SIZE-1) && (ty == 0 || ty == BLOCK_TILE_SIZE-1)){ if (tx == 0) { borderXLoc = 0; x-=THREAD_TILE_SIZE; } else { borderXLoc = TILE_SIZE+1; x+=THREAD_TILE_SIZE; shiftTileReg1 += 16; } if (ty == 0) { borderYLoc = 0; y-=THREAD_TILE_SIZE; } else { borderYLoc = TILE_SIZE+1; y+=THREAD_TILE_SIZE; shiftTileReg1 += 8; } if ( ((tx == 0 && ty == 0 ) && (bx == 0 || by == 0 )) || ((tx == 0 && ty == BLOCK_TILE_SIZE-1) && (bx == 0 || by == gridDim.y-1 )) || ((tx == BLOCK_TILE_SIZE-1 && ty == 0 ) && (bx == gridDim.x-1 || by == 0 )) || ((tx == BLOCK_TILE_SIZE-1 && ty == BLOCK_TILE_SIZE-1) && (bx == gridDim.x-1 || by == gridDim.y-1 )) ){ inTile[borderYLoc][borderXLoc] = 0; } else { bLocation = (((x>>TTSB)&BTSMask) ) | (((y>>TTSB)&BTSMask) << BTSB ) | ((x>>TSB) << (BTSB+BTSB) ) ; bLocation +=((y>>TSB) << (BTSB+BTSB) )*gridDim.x; intensityData = intensity [bLocation]; inTile[borderYLoc][borderXLoc] = (intensityData >> shiftTileReg1 ) & 0xFF; } } __syncthreads(); // Algorithm ///////////////////////////////////////////////////////////////////// for (int tempY = ty+1; tempY <= TILE_SIZE; tempY+=BLOCK_TILE_SIZE ){ for (int tempX = tx+1; tempX <= TILE_SIZE; tempX+=BLOCK_TILE_SIZE ){ int sum1 = inTile[tempY-1][tempX+1] - inTile[tempY-1][tempX-1] + 2 * (inTile[tempY ][tempX+1] - inTile[tempY ][tempX-1]) + inTile[tempY+1][tempX+1] - inTile[tempY+1][tempX-1]; int sum2 = inTile[tempY-1][tempX-1] + inTile[tempY-1][tempX+1] + 2 * (inTile[tempY-1][tempX ] - inTile[tempY+1][tempX ]) - inTile[tempY+1][tempX-1] - inTile[tempY+1][tempX+1]; int magnitude = sum1*sum1+sum2*sum2; if(magnitude > threshold) outTile[tempY][tempX] = 255; else outTile[tempY][tempX] = 0; } } __syncthreads(); // Write back result int intData1 = outTile[sharedY ][sharedX ] & 0xFF; int intData2 = outTile[sharedY ][sharedX+1] & 0xFF; int intData3 = outTile[sharedY+1][sharedX ] & 0xFF; int intData4 = outTile[sharedY+1][sharedX+1] & 0xFF; int intReturnData = intData1 | (intData2 << 8 ) | (intData3 << 16) | (intData4 << 24); result[location] = intReturnData; } unsigned char *sobel(unsigned char *intensity, unsigned int height, unsigned int width){ #if defined(DEBUG) printf("Printing input data\n"); printf("Height: %d\n", height); printf("Width: %d\n", width); #endif int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory #if defined(VERBOSE) printf ("Allocating arrays in GPU memory.\n"); #endif #if defined(CUDA_TIMING) float Ttime; TIMER_CREATE(Ttime); TIMER_START(Ttime); #endif checkCuda(cudaMalloc((void**)&gpu.intensity , gpu.size*sizeof(char))); checkCuda(cudaMalloc((void**)&gpu.result , gpu.size*sizeof(char))); checkCuda(cudaMemset(gpu.result , 0 , gpu.size)); checkCuda(cudaMemset(gpu.intensity , 0 , gpu.size)); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(cudaMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); #if defined(CUDA_TIMING) float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif #if defined(VERBOSE) printf("Running algorithm on GPU.\n"); #endif dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); // Launch kernel to begin image segmenation sobelAlgorithm<<<dimGrid, dimBlock>>>((unsigned int *)gpu.intensity, (unsigned int *)gpu.result, threshold); checkCuda(cudaDeviceSynchronize()); #if defined(CUDA_TIMING) TIMER_END(Ktime); printf("Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(cudaMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(gpu.intensity)); checkCuda(cudaFree(gpu.result)); #if defined(CUDA_TIMING) TIMER_END(Ttime); printf("Total GPU Execution Time: %f ms\n", Ttime); #endif return(gpu.resultOnCPU); } unsigned char *sobelWarmup(unsigned char *intensity, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&gpu.intensity , gpu.size*sizeof(char))); checkCuda(cudaMalloc((void**)&gpu.result , gpu.size*sizeof(char))); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(cudaMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); // Launch kernel to begin image segmenation sobelAlgorithm<<<dimGrid, dimBlock>>>((unsigned int *)gpu.intensity, (unsigned int *)gpu.result, threshold); checkCuda(cudaDeviceSynchronize()); // Retrieve results from the GPU checkCuda(cudaMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(gpu.intensity)); checkCuda(cudaFree(gpu.result)); return(gpu.resultOnCPU); }
4e0a91bfa7c98937d92a04d80ebcfa567bf558f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the testing routine to check the performance of the * NVECTOR CUDA module implementation. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <sundials/sundials_types.h> #include <nvector/nvector_cuda.h> #include <sundials/sundials_math.h> #include "test_nvector_performance.h" /* private functions */ static int InitializeClearCache(int cachesize); static int FinalizeClearCache(); /* private data for clearing cache */ static sunindextype N; /* data length */ static realtype* h_data; /* host data */ static realtype* h_sum; /* host sum */ static realtype* d_data; /* device data */ static realtype* d_sum; /* device sum */ static int blocksPerGrid; /* cuda reduction kernel to clearing cache between tests */ __global__ void ClearCacheKernel(sunindextype N, realtype* data, realtype* out) { __shared__ realtype shared[256]; int sharedidx = blockIdx.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; realtype tmp = 0; while (tid < N) { tmp += data[tid]; tid += blockDim.x * gridDim.x; } shared[sharedidx] = tmp; __syncthreads(); /* assues blockDim is a power of 2 */ int i = blockDim.x/2; while (i != 0) { if (sharedidx < i) shared[sharedidx] += shared[sharedidx + i]; __syncthreads(); i /= 2; } if (sharedidx == 0) out[sharedidx] = shared[0]; } /* ---------------------------------------------------------------------- * Main NVector Testing Routine * --------------------------------------------------------------------*/ int main(int argc, char *argv[]) { SUNContext ctx = NULL; /* SUNDIALS context */ N_Vector X = NULL; /* test vector */ sunindextype veclen; /* vector length */ int print_timing; /* output timings */ int ntests; /* number of tests */ int nvecs; /* number of tests */ int nsums; /* number of sums */ int cachesize; /* size of cache (MB) */ int flag; /* return flag */ printf("\nStart Tests\n"); printf("Vector Name: Cuda\n"); /* check input and set vector length */ if (argc < 7){ printf("ERROR: SIX (6) arguments required: "); printf("<vector length> <number of vectors> <number of sums> <number of tests> "); printf("<cache size (MB)> <print timing>\n"); return(-1); } veclen = atol(argv[1]); if (veclen <= 0) { printf("ERROR: length of vector must be a positive integer \n"); return(-1); } nvecs = atol(argv[2]); if (nvecs < 1) { printf("WARNING: Fused operation tests disabled\n"); } nsums = atol(argv[3]); if (nsums < 1) { printf("WARNING: Some fused operation tests disabled\n"); } ntests = atol(argv[4]); if (ntests <= 0) { printf("ERROR: number of tests must be a positive integer \n"); return(-1); } cachesize = atol(argv[5]); if (cachesize < 0) { printf("ERROR: cache size (MB) must be a non-negative integer \n"); return(-1); } InitializeClearCache(cachesize); print_timing = atoi(argv[6]); SetTiming(print_timing, 0); printf("\nRunning with: \n"); printf(" vector length %ld \n", (long int) veclen); printf(" max number of vectors %d \n", nvecs); printf(" max number of sums %d \n", nsums); printf(" number of tests %d \n", ntests); printf(" timing on/off %d \n", print_timing); flag = SUNContext_Create(NULL, &ctx); if (flag) return flag; /* Create vectors */ X = N_VNew_Cuda(veclen, ctx); /* run tests */ if (print_timing) printf("\n\n standard operations:\n"); if (print_timing) PrintTableHeader(1); flag = Test_N_VLinearSum(X, veclen, ntests); flag = Test_N_VConst(X, veclen, ntests); flag = Test_N_VProd(X, veclen, ntests); flag = Test_N_VDiv(X, veclen, ntests); flag = Test_N_VScale(X, veclen, ntests); flag = Test_N_VAbs(X, veclen, ntests); flag = Test_N_VInv(X, veclen, ntests); flag = Test_N_VAddConst(X, veclen, ntests); flag = Test_N_VDotProd(X, veclen, ntests); flag = Test_N_VMaxNorm(X, veclen, ntests); flag = Test_N_VWrmsNorm(X, veclen, ntests); flag = Test_N_VWrmsNormMask(X, veclen, ntests); flag = Test_N_VMin(X, veclen, ntests); flag = Test_N_VWL2Norm(X, veclen, ntests); flag = Test_N_VL1Norm(X, veclen, ntests); flag = Test_N_VCompare(X, veclen, ntests); flag = Test_N_VInvTest(X, veclen, ntests); flag = Test_N_VConstrMask(X, veclen, ntests); flag = Test_N_VMinQuotient(X, veclen, ntests); if (nvecs > 0) { if (print_timing) printf("\n\n fused operations 1: nvecs= %d\n", nvecs); if (print_timing) PrintTableHeader(2); flag = Test_N_VLinearCombination(X, veclen, nvecs, ntests); flag = Test_N_VScaleAddMulti(X, veclen, nvecs, ntests); flag = Test_N_VDotProdMulti(X, veclen, nvecs, ntests); flag = Test_N_VLinearSumVectorArray(X, veclen, nvecs, ntests); flag = Test_N_VScaleVectorArray(X, veclen, nvecs, ntests); flag = Test_N_VConstVectorArray(X, veclen, nvecs, ntests); flag = Test_N_VWrmsNormVectorArray(X, veclen, nvecs, ntests); flag = Test_N_VWrmsNormMaskVectorArray(X, veclen, nvecs, ntests); if (nsums > 0) { if (print_timing) printf("\n\n fused operations 2: nvecs= %d nsums= %d\n", nvecs, nsums); if (print_timing) PrintTableHeader(2); flag = Test_N_VScaleAddMultiVectorArray(X, veclen, nvecs, nsums, ntests); flag = Test_N_VLinearCombinationVectorArray(X, veclen, nvecs, nsums, ntests); } } /* Free vectors */ N_VDestroy(X); FinalizeClearCache(); flag = SUNContext_Free(&ctx); if (flag) return flag; printf("\nFinished Tests\n"); return(flag); } /* ---------------------------------------------------------------------- * Functions required by testing routines to fill vector data * --------------------------------------------------------------------*/ /* random data between lower and upper */ void N_VRand(N_Vector Xvec, sunindextype Xlen, realtype lower, realtype upper) { rand_realtype(N_VGetHostArrayPointer_Cuda(Xvec), Xlen, lower, upper); N_VCopyToDevice_Cuda(Xvec); } /* series of 0 and 1 */ void N_VRandZeroOne(N_Vector Xvec, sunindextype Xlen) { rand_realtype_zero_one(N_VGetHostArrayPointer_Cuda(Xvec), Xlen); N_VCopyToDevice_Cuda(Xvec); } /* random values for constraint array */ void N_VRandConstraints(N_Vector Xvec, sunindextype Xlen) { rand_realtype_constraints(N_VGetHostArrayPointer_Cuda(Xvec), Xlen); N_VCopyToDevice_Cuda(Xvec); } /* ---------------------------------------------------------------------- * Functions required for MPI or GPU testing * --------------------------------------------------------------------*/ void collect_times(N_Vector X, double *times, int ntimes) { /* not running with MPI, just return */ return; } void sync_device(N_Vector x) { hipDeviceSynchronize(); return; } /* ---------------------------------------------------------------------- * Functions required for clearing cache * --------------------------------------------------------------------*/ static int InitializeClearCache(int cachesize) { hipError_t err; /* cuda error flag */ size_t nbytes; /* cache size in bytes */ /* determine size of vector to clear cache, N = ceil(2 * nbytes/realtype) */ nbytes = (size_t) (2 * cachesize * 1024 * 1024); N = (sunindextype) ((nbytes + sizeof(realtype) - 1)/sizeof(realtype)); /* allocate host data */ blocksPerGrid = SUNMIN(32,(N+255)/256); h_data = (realtype*) malloc(N*sizeof(realtype)); h_sum = (realtype*) malloc(blocksPerGrid*sizeof(realtype)); /* allocate device data */ err = hipMalloc((void**) &d_data, N*sizeof(realtype)); if (err != hipSuccess) { fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err); return(-1); } err = hipMalloc((void**) &d_sum, blocksPerGrid*sizeof(realtype)); if (err != hipSuccess) { fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err); return(-1); } /* fill host vector with random data and copy to device */ rand_realtype(h_data, N, RCONST(-1.0), RCONST(1.0)); err = hipMemcpy(d_data, h_data, N*sizeof(realtype), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr,"Failed to copy data from host to device (error code %d )!\n",err); return(-1); } return(0); } static int FinalizeClearCache() { hipError_t err; /* cuda error flag */ free(h_data); free(h_sum); err = hipFree(d_data); if (err != hipSuccess) { fprintf(stderr,"Failed to free device data (error code %d )!\n",err); return(-1); } err = hipFree(d_sum); if (err != hipSuccess) { fprintf(stderr,"Failed to free device data (error code %d )!\n",err); return(-1); } return(0); } void ClearCache() { /* call cuda kernel to clear the cache */ hipLaunchKernelGGL(( ClearCacheKernel), dim3(SUNMIN(32,(N+255)/256)), dim3(256), 0, 0, N, d_data, d_sum); hipMemcpy(h_sum, d_sum, blocksPerGrid*sizeof(realtype), hipMemcpyDeviceToHost); hipDeviceSynchronize(); return; }
4e0a91bfa7c98937d92a04d80ebcfa567bf558f2.cu
/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the testing routine to check the performance of the * NVECTOR CUDA module implementation. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <sundials/sundials_types.h> #include <nvector/nvector_cuda.h> #include <sundials/sundials_math.h> #include "test_nvector_performance.h" /* private functions */ static int InitializeClearCache(int cachesize); static int FinalizeClearCache(); /* private data for clearing cache */ static sunindextype N; /* data length */ static realtype* h_data; /* host data */ static realtype* h_sum; /* host sum */ static realtype* d_data; /* device data */ static realtype* d_sum; /* device sum */ static int blocksPerGrid; /* cuda reduction kernel to clearing cache between tests */ __global__ void ClearCacheKernel(sunindextype N, realtype* data, realtype* out) { __shared__ realtype shared[256]; int sharedidx = blockIdx.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; realtype tmp = 0; while (tid < N) { tmp += data[tid]; tid += blockDim.x * gridDim.x; } shared[sharedidx] = tmp; __syncthreads(); /* assues blockDim is a power of 2 */ int i = blockDim.x/2; while (i != 0) { if (sharedidx < i) shared[sharedidx] += shared[sharedidx + i]; __syncthreads(); i /= 2; } if (sharedidx == 0) out[sharedidx] = shared[0]; } /* ---------------------------------------------------------------------- * Main NVector Testing Routine * --------------------------------------------------------------------*/ int main(int argc, char *argv[]) { SUNContext ctx = NULL; /* SUNDIALS context */ N_Vector X = NULL; /* test vector */ sunindextype veclen; /* vector length */ int print_timing; /* output timings */ int ntests; /* number of tests */ int nvecs; /* number of tests */ int nsums; /* number of sums */ int cachesize; /* size of cache (MB) */ int flag; /* return flag */ printf("\nStart Tests\n"); printf("Vector Name: Cuda\n"); /* check input and set vector length */ if (argc < 7){ printf("ERROR: SIX (6) arguments required: "); printf("<vector length> <number of vectors> <number of sums> <number of tests> "); printf("<cache size (MB)> <print timing>\n"); return(-1); } veclen = atol(argv[1]); if (veclen <= 0) { printf("ERROR: length of vector must be a positive integer \n"); return(-1); } nvecs = atol(argv[2]); if (nvecs < 1) { printf("WARNING: Fused operation tests disabled\n"); } nsums = atol(argv[3]); if (nsums < 1) { printf("WARNING: Some fused operation tests disabled\n"); } ntests = atol(argv[4]); if (ntests <= 0) { printf("ERROR: number of tests must be a positive integer \n"); return(-1); } cachesize = atol(argv[5]); if (cachesize < 0) { printf("ERROR: cache size (MB) must be a non-negative integer \n"); return(-1); } InitializeClearCache(cachesize); print_timing = atoi(argv[6]); SetTiming(print_timing, 0); printf("\nRunning with: \n"); printf(" vector length %ld \n", (long int) veclen); printf(" max number of vectors %d \n", nvecs); printf(" max number of sums %d \n", nsums); printf(" number of tests %d \n", ntests); printf(" timing on/off %d \n", print_timing); flag = SUNContext_Create(NULL, &ctx); if (flag) return flag; /* Create vectors */ X = N_VNew_Cuda(veclen, ctx); /* run tests */ if (print_timing) printf("\n\n standard operations:\n"); if (print_timing) PrintTableHeader(1); flag = Test_N_VLinearSum(X, veclen, ntests); flag = Test_N_VConst(X, veclen, ntests); flag = Test_N_VProd(X, veclen, ntests); flag = Test_N_VDiv(X, veclen, ntests); flag = Test_N_VScale(X, veclen, ntests); flag = Test_N_VAbs(X, veclen, ntests); flag = Test_N_VInv(X, veclen, ntests); flag = Test_N_VAddConst(X, veclen, ntests); flag = Test_N_VDotProd(X, veclen, ntests); flag = Test_N_VMaxNorm(X, veclen, ntests); flag = Test_N_VWrmsNorm(X, veclen, ntests); flag = Test_N_VWrmsNormMask(X, veclen, ntests); flag = Test_N_VMin(X, veclen, ntests); flag = Test_N_VWL2Norm(X, veclen, ntests); flag = Test_N_VL1Norm(X, veclen, ntests); flag = Test_N_VCompare(X, veclen, ntests); flag = Test_N_VInvTest(X, veclen, ntests); flag = Test_N_VConstrMask(X, veclen, ntests); flag = Test_N_VMinQuotient(X, veclen, ntests); if (nvecs > 0) { if (print_timing) printf("\n\n fused operations 1: nvecs= %d\n", nvecs); if (print_timing) PrintTableHeader(2); flag = Test_N_VLinearCombination(X, veclen, nvecs, ntests); flag = Test_N_VScaleAddMulti(X, veclen, nvecs, ntests); flag = Test_N_VDotProdMulti(X, veclen, nvecs, ntests); flag = Test_N_VLinearSumVectorArray(X, veclen, nvecs, ntests); flag = Test_N_VScaleVectorArray(X, veclen, nvecs, ntests); flag = Test_N_VConstVectorArray(X, veclen, nvecs, ntests); flag = Test_N_VWrmsNormVectorArray(X, veclen, nvecs, ntests); flag = Test_N_VWrmsNormMaskVectorArray(X, veclen, nvecs, ntests); if (nsums > 0) { if (print_timing) printf("\n\n fused operations 2: nvecs= %d nsums= %d\n", nvecs, nsums); if (print_timing) PrintTableHeader(2); flag = Test_N_VScaleAddMultiVectorArray(X, veclen, nvecs, nsums, ntests); flag = Test_N_VLinearCombinationVectorArray(X, veclen, nvecs, nsums, ntests); } } /* Free vectors */ N_VDestroy(X); FinalizeClearCache(); flag = SUNContext_Free(&ctx); if (flag) return flag; printf("\nFinished Tests\n"); return(flag); } /* ---------------------------------------------------------------------- * Functions required by testing routines to fill vector data * --------------------------------------------------------------------*/ /* random data between lower and upper */ void N_VRand(N_Vector Xvec, sunindextype Xlen, realtype lower, realtype upper) { rand_realtype(N_VGetHostArrayPointer_Cuda(Xvec), Xlen, lower, upper); N_VCopyToDevice_Cuda(Xvec); } /* series of 0 and 1 */ void N_VRandZeroOne(N_Vector Xvec, sunindextype Xlen) { rand_realtype_zero_one(N_VGetHostArrayPointer_Cuda(Xvec), Xlen); N_VCopyToDevice_Cuda(Xvec); } /* random values for constraint array */ void N_VRandConstraints(N_Vector Xvec, sunindextype Xlen) { rand_realtype_constraints(N_VGetHostArrayPointer_Cuda(Xvec), Xlen); N_VCopyToDevice_Cuda(Xvec); } /* ---------------------------------------------------------------------- * Functions required for MPI or GPU testing * --------------------------------------------------------------------*/ void collect_times(N_Vector X, double *times, int ntimes) { /* not running with MPI, just return */ return; } void sync_device(N_Vector x) { cudaDeviceSynchronize(); return; } /* ---------------------------------------------------------------------- * Functions required for clearing cache * --------------------------------------------------------------------*/ static int InitializeClearCache(int cachesize) { cudaError_t err; /* cuda error flag */ size_t nbytes; /* cache size in bytes */ /* determine size of vector to clear cache, N = ceil(2 * nbytes/realtype) */ nbytes = (size_t) (2 * cachesize * 1024 * 1024); N = (sunindextype) ((nbytes + sizeof(realtype) - 1)/sizeof(realtype)); /* allocate host data */ blocksPerGrid = SUNMIN(32,(N+255)/256); h_data = (realtype*) malloc(N*sizeof(realtype)); h_sum = (realtype*) malloc(blocksPerGrid*sizeof(realtype)); /* allocate device data */ err = cudaMalloc((void**) &d_data, N*sizeof(realtype)); if (err != cudaSuccess) { fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err); return(-1); } err = cudaMalloc((void**) &d_sum, blocksPerGrid*sizeof(realtype)); if (err != cudaSuccess) { fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err); return(-1); } /* fill host vector with random data and copy to device */ rand_realtype(h_data, N, RCONST(-1.0), RCONST(1.0)); err = cudaMemcpy(d_data, h_data, N*sizeof(realtype), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr,"Failed to copy data from host to device (error code %d )!\n",err); return(-1); } return(0); } static int FinalizeClearCache() { cudaError_t err; /* cuda error flag */ free(h_data); free(h_sum); err = cudaFree(d_data); if (err != cudaSuccess) { fprintf(stderr,"Failed to free device data (error code %d )!\n",err); return(-1); } err = cudaFree(d_sum); if (err != cudaSuccess) { fprintf(stderr,"Failed to free device data (error code %d )!\n",err); return(-1); } return(0); } void ClearCache() { /* call cuda kernel to clear the cache */ ClearCacheKernel<<<SUNMIN(32,(N+255)/256), 256>>>(N, d_data, d_sum); cudaMemcpy(h_sum, d_sum, blocksPerGrid*sizeof(realtype), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); return; }
914d16091cce64fd5f68c08d0499a0a6ba9845ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "box2d2r-512-9-512_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_9(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 476; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_5_3; double __reg_5_4; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_6_3; double __reg_6_4; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_7_3; double __reg_7_4; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_8_3; double __reg_8_4; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_9_3; double __reg_9_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(2, __reg_9_2); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(3, __reg_9_3); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(4, __reg_9_4); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(5, __reg_9_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(6, __reg_9_1); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(7, __reg_9_2); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(8, __reg_9_3); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(9, __reg_9_4); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(10, __reg_9_0); __LOAD(__reg_0, 29); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(11, __reg_9_1); __LOAD(__reg_0, 30); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(12, __reg_9_2); __LOAD(__reg_0, 31); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(13, __reg_9_3); __LOAD(__reg_0, 32); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(14, __reg_9_4); __LOAD(__reg_0, 33); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(15, __reg_9_0); __LOAD(__reg_0, 34); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(16, __reg_9_1); __LOAD(__reg_0, 35); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(17, __reg_9_2); __LOAD(__reg_0, 36); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(18, __reg_9_3); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __LOAD(__reg_0, 29); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __LOAD(__reg_0, 30); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __LOAD(__reg_0, 31); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __LOAD(__reg_0, 32); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __LOAD(__reg_0, 33); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __LOAD(__reg_0, 34); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __LOAD(__reg_0, 35); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __LOAD(__reg_0, 36); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(18, __reg_9_3); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 18, __reg_9_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 18, __reg_9_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 18, __reg_9_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 18, __reg_9_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 17, __reg_9_0); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 16, __reg_9_1); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 15, __reg_9_2); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 14, __reg_9_3); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 13, __reg_9_4); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 12, __reg_9_0); __reg_4_2 = __reg_3_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 11, __reg_9_1); __reg_4_3 = __reg_3_3; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 10, __reg_9_2); __reg_5_2 = __reg_4_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 9, __reg_9_3); __reg_5_3 = __reg_4_3; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 8, __reg_9_4); __reg_6_2 = __reg_5_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 7, __reg_9_0); __reg_6_3 = __reg_5_3; __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 6, __reg_9_1); __reg_7_2 = __reg_6_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 5, __reg_9_2); __reg_7_3 = __reg_6_3; __CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 4, __reg_9_3); __reg_8_2 = __reg_7_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 3, __reg_9_4); __reg_8_3 = __reg_7_3; __CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 2, __reg_9_0); __CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3); __STORE(__h - 1, __reg_9_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 17, __reg_9_0); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 16, __reg_9_1); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 15, __reg_9_2); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 14, __reg_9_3); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 13, __reg_9_4); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 12, __reg_9_0); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 11, __reg_9_1); __reg_4_3 = __reg_3_3; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 10, __reg_9_2); __reg_4_4 = __reg_3_4; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 9, __reg_9_3); __reg_5_3 = __reg_4_3; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 8, __reg_9_4); __reg_5_4 = __reg_4_4; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 7, __reg_9_0); __reg_6_3 = __reg_5_3; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 6, __reg_9_1); __reg_6_4 = __reg_5_4; __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 5, __reg_9_2); __reg_7_3 = __reg_6_3; __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 4, __reg_9_3); __reg_7_4 = __reg_6_4; __CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 3, __reg_9_4); __reg_8_3 = __reg_7_3; __CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 2, __reg_9_0); __reg_8_4 = __reg_7_4; __CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 1, __reg_9_1); __CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4); __STORE(__h + 0, __reg_9_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 17, __reg_9_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 16, __reg_9_1); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 15, __reg_9_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 14, __reg_9_3); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 13, __reg_9_4); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 12, __reg_9_0); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 11, __reg_9_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 10, __reg_9_2); __reg_4_4 = __reg_3_4; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 9, __reg_9_3); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 8, __reg_9_4); __reg_5_4 = __reg_4_4; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 7, __reg_9_0); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 6, __reg_9_1); __reg_6_4 = __reg_5_4; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 5, __reg_9_2); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 4, __reg_9_3); __reg_7_4 = __reg_6_4; __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 3, __reg_9_4); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 2, __reg_9_0); __reg_8_4 = __reg_7_4; __CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 1, __reg_9_1); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h + 0, __reg_9_2); __CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0); __STORE(__h + 1, __reg_9_3); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 17, __reg_9_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 16, __reg_9_1); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 15, __reg_9_2); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 14, __reg_9_3); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 13, __reg_9_4); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 12, __reg_9_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 11, __reg_9_1); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 10, __reg_9_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 9, __reg_9_3); __reg_4_0 = __reg_3_0; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 8, __reg_9_4); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 7, __reg_9_0); __reg_5_0 = __reg_4_0; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 6, __reg_9_1); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 5, __reg_9_2); __reg_6_0 = __reg_5_0; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 4, __reg_9_3); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 3, __reg_9_4); __reg_7_0 = __reg_6_0; __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 2, __reg_9_0); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 1, __reg_9_1); __reg_8_0 = __reg_7_0; __CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h + 0, __reg_9_2); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h + 1, __reg_9_3); __CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1); __STORE(__h + 2, __reg_9_4); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 17, __reg_9_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 16, __reg_9_1); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 15, __reg_9_2); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 14, __reg_9_3); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 13, __reg_9_4); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 12, __reg_9_0); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 11, __reg_9_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 10, __reg_9_2); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 9, __reg_9_3); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 8, __reg_9_4); __reg_4_1 = __reg_3_1; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 7, __reg_9_0); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 6, __reg_9_1); __reg_5_1 = __reg_4_1; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 5, __reg_9_2); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 4, __reg_9_3); __reg_6_1 = __reg_5_1; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 3, __reg_9_4); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 2, __reg_9_0); __reg_7_1 = __reg_6_1; __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 1, __reg_9_1); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h + 0, __reg_9_2); __reg_8_1 = __reg_7_1; __CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h + 1, __reg_9_3); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h + 2, __reg_9_4); __CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2); __STORE(__h + 3, __reg_9_0); } } else { for (__h = 37; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 18, __reg_9_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 18, __reg_9_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 18, __reg_9_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 18, __reg_9_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 18, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 18, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 18, __reg_9_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 18, __reg_9_3); __h++; } } __global__ void kernel0_8(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_5_3; double __reg_5_4; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_6_3; double __reg_6_4; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_7_3; double __reg_7_4; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_8_3; double __reg_8_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(2, __reg_8_2); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(3, __reg_8_3); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(4, __reg_8_4); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(5, __reg_8_0); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(6, __reg_8_1); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(7, __reg_8_2); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(8, __reg_8_3); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(9, __reg_8_4); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(10, __reg_8_0); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(11, __reg_8_1); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(12, __reg_8_2); __LOAD(__reg_0, 29); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(13, __reg_8_3); __LOAD(__reg_0, 30); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(14, __reg_8_4); __LOAD(__reg_0, 31); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(15, __reg_8_0); __LOAD(__reg_0, 32); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(16, __reg_8_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __LOAD(__reg_0, 29); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __LOAD(__reg_0, 30); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __LOAD(__reg_0, 31); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __LOAD(__reg_0, 32); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(16, __reg_8_1); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 16, __reg_8_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 16, __reg_8_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 16, __reg_8_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 16, __reg_8_1); __h++; } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 15, __reg_8_3); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 14, __reg_8_4); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 13, __reg_8_0); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 12, __reg_8_1); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 11, __reg_8_2); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 10, __reg_8_3); __reg_4_3 = __reg_3_3; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 9, __reg_8_4); __reg_4_4 = __reg_3_4; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 8, __reg_8_0); __reg_5_3 = __reg_4_3; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 7, __reg_8_1); __reg_5_4 = __reg_4_4; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 6, __reg_8_2); __reg_6_3 = __reg_5_3; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 5, __reg_8_3); __reg_6_4 = __reg_5_4; __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 4, __reg_8_4); __reg_7_3 = __reg_6_3; __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 3, __reg_8_0); __reg_7_4 = __reg_6_4; __CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 2, __reg_8_1); __CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4); __STORE(__h - 1, __reg_8_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 15, __reg_8_3); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 14, __reg_8_4); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 13, __reg_8_0); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 12, __reg_8_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 11, __reg_8_2); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 10, __reg_8_3); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 9, __reg_8_4); __reg_4_4 = __reg_3_4; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 8, __reg_8_0); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 7, __reg_8_1); __reg_5_4 = __reg_4_4; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 6, __reg_8_2); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 5, __reg_8_3); __reg_6_4 = __reg_5_4; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 4, __reg_8_4); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 3, __reg_8_0); __reg_7_4 = __reg_6_4; __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 2, __reg_8_1); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 1, __reg_8_2); __CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0); __STORE(__h + 0, __reg_8_3); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 15, __reg_8_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 14, __reg_8_4); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 13, __reg_8_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 12, __reg_8_1); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 11, __reg_8_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 10, __reg_8_3); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 9, __reg_8_4); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 8, __reg_8_0); __reg_4_0 = __reg_3_0; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 7, __reg_8_1); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 6, __reg_8_2); __reg_5_0 = __reg_4_0; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 5, __reg_8_3); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 4, __reg_8_4); __reg_6_0 = __reg_5_0; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 3, __reg_8_0); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 2, __reg_8_1); __reg_7_0 = __reg_6_0; __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 1, __reg_8_2); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h + 0, __reg_8_3); __CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1); __STORE(__h + 1, __reg_8_4); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 15, __reg_8_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 14, __reg_8_4); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 13, __reg_8_0); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 12, __reg_8_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 11, __reg_8_2); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 10, __reg_8_3); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 9, __reg_8_4); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 8, __reg_8_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 7, __reg_8_1); __reg_4_1 = __reg_3_1; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 6, __reg_8_2); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 5, __reg_8_3); __reg_5_1 = __reg_4_1; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 4, __reg_8_4); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 3, __reg_8_0); __reg_6_1 = __reg_5_1; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 2, __reg_8_1); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 1, __reg_8_2); __reg_7_1 = __reg_6_1; __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h + 0, __reg_8_3); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h + 1, __reg_8_4); __CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2); __STORE(__h + 2, __reg_8_0); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 15, __reg_8_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 14, __reg_8_4); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 13, __reg_8_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 12, __reg_8_1); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 11, __reg_8_2); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 10, __reg_8_3); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 9, __reg_8_4); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 8, __reg_8_0); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 7, __reg_8_1); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 6, __reg_8_2); __reg_4_2 = __reg_3_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 5, __reg_8_3); __reg_4_3 = __reg_3_3; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 4, __reg_8_4); __reg_5_2 = __reg_4_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 3, __reg_8_0); __reg_5_3 = __reg_4_3; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 2, __reg_8_1); __reg_6_2 = __reg_5_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 1, __reg_8_2); __reg_6_3 = __reg_5_3; __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h + 0, __reg_8_3); __reg_7_2 = __reg_6_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h + 1, __reg_8_4); __reg_7_3 = __reg_6_3; __CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h + 2, __reg_8_0); __CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3); __STORE(__h + 3, __reg_8_1); } } else { for (__h = 33; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 16, __reg_8_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 16, __reg_8_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 16, __reg_8_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 16, __reg_8_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 16, __reg_8_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 16, __reg_8_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 16, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 16, __reg_8_1); __h++; } } __global__ void kernel0_7(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_5_3; double __reg_5_4; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_6_3; double __reg_6_4; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_7_3; double __reg_7_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(2, __reg_7_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(3, __reg_7_3); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(4, __reg_7_4); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(5, __reg_7_0); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(6, __reg_7_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(7, __reg_7_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(8, __reg_7_3); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(9, __reg_7_4); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(10, __reg_7_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(11, __reg_7_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(12, __reg_7_2); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(13, __reg_7_3); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(14, __reg_7_4); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(14, __reg_7_4); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 14, __reg_7_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 14, __reg_7_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 14, __reg_7_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 14, __reg_7_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 13, __reg_7_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 12, __reg_7_2); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 11, __reg_7_3); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 10, __reg_7_4); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 9, __reg_7_0); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 8, __reg_7_1); __reg_4_4 = __reg_3_4; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 7, __reg_7_2); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 6, __reg_7_3); __reg_5_4 = __reg_4_4; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 5, __reg_7_4); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 4, __reg_7_0); __reg_6_4 = __reg_5_4; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 3, __reg_7_1); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 2, __reg_7_2); __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0); __STORE(__h - 1, __reg_7_3); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 13, __reg_7_1); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 12, __reg_7_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 11, __reg_7_3); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 10, __reg_7_4); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 9, __reg_7_0); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 8, __reg_7_1); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 7, __reg_7_2); __reg_4_0 = __reg_3_0; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 6, __reg_7_3); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 5, __reg_7_4); __reg_5_0 = __reg_4_0; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 4, __reg_7_0); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 3, __reg_7_1); __reg_6_0 = __reg_5_0; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 2, __reg_7_2); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 1, __reg_7_3); __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1); __STORE(__h + 0, __reg_7_4); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 13, __reg_7_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 12, __reg_7_2); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 11, __reg_7_3); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 10, __reg_7_4); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 9, __reg_7_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 8, __reg_7_1); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 7, __reg_7_2); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 6, __reg_7_3); __reg_4_1 = __reg_3_1; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 5, __reg_7_4); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 4, __reg_7_0); __reg_5_1 = __reg_4_1; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 3, __reg_7_1); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 2, __reg_7_2); __reg_6_1 = __reg_5_1; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 1, __reg_7_3); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h + 0, __reg_7_4); __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2); __STORE(__h + 1, __reg_7_0); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 13, __reg_7_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 12, __reg_7_2); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 11, __reg_7_3); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 10, __reg_7_4); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 9, __reg_7_0); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 8, __reg_7_1); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 7, __reg_7_2); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 6, __reg_7_3); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 5, __reg_7_4); __reg_4_2 = __reg_3_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 4, __reg_7_0); __reg_4_3 = __reg_3_3; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 3, __reg_7_1); __reg_5_2 = __reg_4_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 2, __reg_7_2); __reg_5_3 = __reg_4_3; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 1, __reg_7_3); __reg_6_2 = __reg_5_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h + 0, __reg_7_4); __reg_6_3 = __reg_5_3; __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h + 1, __reg_7_0); __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3); __STORE(__h + 2, __reg_7_1); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 13, __reg_7_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 12, __reg_7_2); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 11, __reg_7_3); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 10, __reg_7_4); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 9, __reg_7_0); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 8, __reg_7_1); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 7, __reg_7_2); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 6, __reg_7_3); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 5, __reg_7_4); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 4, __reg_7_0); __reg_4_3 = __reg_3_3; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 3, __reg_7_1); __reg_4_4 = __reg_3_4; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 2, __reg_7_2); __reg_5_3 = __reg_4_3; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 1, __reg_7_3); __reg_5_4 = __reg_4_4; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h + 0, __reg_7_4); __reg_6_3 = __reg_5_3; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h + 1, __reg_7_0); __reg_6_4 = __reg_5_4; __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h + 2, __reg_7_1); __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4); __STORE(__h + 3, __reg_7_2); } } else { for (__h = 29; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 14, __reg_7_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 14, __reg_7_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 14, __reg_7_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 14, __reg_7_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 14, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 14, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 14, __reg_7_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 14, __reg_7_4); __h++; } } __global__ void kernel0_6(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_5_3; double __reg_5_4; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_6_3; double __reg_6_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(2, __reg_6_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(3, __reg_6_3); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(4, __reg_6_4); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(5, __reg_6_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(6, __reg_6_1); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(7, __reg_6_2); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(8, __reg_6_3); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(9, __reg_6_4); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(10, __reg_6_0); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(11, __reg_6_1); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(12, __reg_6_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(12, __reg_6_2); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 12, __reg_6_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 12, __reg_6_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 12, __reg_6_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 12, __reg_6_2); __h++; } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 11, __reg_6_4); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 10, __reg_6_0); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 9, __reg_6_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 8, __reg_6_2); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 7, __reg_6_3); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 6, __reg_6_4); __reg_4_0 = __reg_3_0; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 5, __reg_6_0); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 4, __reg_6_1); __reg_5_0 = __reg_4_0; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 3, __reg_6_2); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 2, __reg_6_3); __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1); __STORE(__h - 1, __reg_6_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 11, __reg_6_4); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 10, __reg_6_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 9, __reg_6_1); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 8, __reg_6_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 7, __reg_6_3); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 6, __reg_6_4); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 5, __reg_6_0); __reg_4_1 = __reg_3_1; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 4, __reg_6_1); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 3, __reg_6_2); __reg_5_1 = __reg_4_1; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 2, __reg_6_3); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 1, __reg_6_4); __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2); __STORE(__h + 0, __reg_6_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 11, __reg_6_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 10, __reg_6_0); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 9, __reg_6_1); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 8, __reg_6_2); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 7, __reg_6_3); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 6, __reg_6_4); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 5, __reg_6_0); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 4, __reg_6_1); __reg_4_2 = __reg_3_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 3, __reg_6_2); __reg_4_3 = __reg_3_3; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 2, __reg_6_3); __reg_5_2 = __reg_4_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 1, __reg_6_4); __reg_5_3 = __reg_4_3; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h + 0, __reg_6_0); __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3); __STORE(__h + 1, __reg_6_1); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 11, __reg_6_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 10, __reg_6_0); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 9, __reg_6_1); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 8, __reg_6_2); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 7, __reg_6_3); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 6, __reg_6_4); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 5, __reg_6_0); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 4, __reg_6_1); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 3, __reg_6_2); __reg_4_3 = __reg_3_3; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 2, __reg_6_3); __reg_4_4 = __reg_3_4; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 1, __reg_6_4); __reg_5_3 = __reg_4_3; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h + 0, __reg_6_0); __reg_5_4 = __reg_4_4; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h + 1, __reg_6_1); __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4); __STORE(__h + 2, __reg_6_2); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 11, __reg_6_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 10, __reg_6_0); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 9, __reg_6_1); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 8, __reg_6_2); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 7, __reg_6_3); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 6, __reg_6_4); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 5, __reg_6_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 4, __reg_6_1); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 3, __reg_6_2); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 2, __reg_6_3); __reg_4_4 = __reg_3_4; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 1, __reg_6_4); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h + 0, __reg_6_0); __reg_5_4 = __reg_4_4; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h + 1, __reg_6_1); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h + 2, __reg_6_2); __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0); __STORE(__h + 3, __reg_6_3); } } else { for (__h = 25; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 12, __reg_6_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 12, __reg_6_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 12, __reg_6_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 12, __reg_6_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 12, __reg_6_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 12, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 12, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 12, __reg_6_2); __h++; } } __global__ void kernel0_5(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_5_3; double __reg_5_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(2, __reg_5_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(3, __reg_5_3); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(4, __reg_5_4); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(5, __reg_5_0); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(6, __reg_5_1); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(7, __reg_5_2); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(8, __reg_5_3); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(9, __reg_5_4); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(10, __reg_5_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(10, __reg_5_0); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 10, __reg_5_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 10, __reg_5_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 10, __reg_5_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 10, __reg_5_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 9, __reg_5_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 8, __reg_5_3); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 7, __reg_5_4); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 6, __reg_5_0); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 5, __reg_5_1); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 4, __reg_5_2); __reg_4_1 = __reg_3_1; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 3, __reg_5_3); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 2, __reg_5_4); __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2); __STORE(__h - 1, __reg_5_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 9, __reg_5_2); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 8, __reg_5_3); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 7, __reg_5_4); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 6, __reg_5_0); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 5, __reg_5_1); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 4, __reg_5_2); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 3, __reg_5_3); __reg_4_2 = __reg_3_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 2, __reg_5_4); __reg_4_3 = __reg_3_3; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 1, __reg_5_0); __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3); __STORE(__h + 0, __reg_5_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 9, __reg_5_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 8, __reg_5_3); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 7, __reg_5_4); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 6, __reg_5_0); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 5, __reg_5_1); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 4, __reg_5_2); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 3, __reg_5_3); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 2, __reg_5_4); __reg_4_3 = __reg_3_3; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 1, __reg_5_0); __reg_4_4 = __reg_3_4; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h + 0, __reg_5_1); __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4); __STORE(__h + 1, __reg_5_2); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 9, __reg_5_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 8, __reg_5_3); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 7, __reg_5_4); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 6, __reg_5_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 5, __reg_5_1); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 4, __reg_5_2); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 3, __reg_5_3); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 2, __reg_5_4); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 1, __reg_5_0); __reg_4_4 = __reg_3_4; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h + 0, __reg_5_1); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h + 1, __reg_5_2); __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0); __STORE(__h + 2, __reg_5_3); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 9, __reg_5_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 8, __reg_5_3); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 7, __reg_5_4); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 6, __reg_5_0); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 5, __reg_5_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 4, __reg_5_2); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 3, __reg_5_3); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 2, __reg_5_4); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 1, __reg_5_0); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h + 0, __reg_5_1); __reg_4_0 = __reg_3_0; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h + 1, __reg_5_2); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h + 2, __reg_5_3); __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1); __STORE(__h + 3, __reg_5_4); } } else { for (__h = 21; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 10, __reg_5_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 10, __reg_5_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 10, __reg_5_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 10, __reg_5_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 10, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 10, __reg_5_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 10, __reg_5_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 10, __reg_5_0); __h++; } } __global__ void kernel0_4(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(2, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(3, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(4, __reg_4_4); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(5, __reg_4_0); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(6, __reg_4_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(7, __reg_4_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(8, __reg_4_3); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(8, __reg_4_3); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 8, __reg_4_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 8, __reg_4_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 8, __reg_4_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 8, __reg_4_3); __h++; } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 7, __reg_4_0); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 6, __reg_4_1); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 5, __reg_4_2); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 4, __reg_4_3); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 3, __reg_4_4); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 2, __reg_4_0); __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __STORE(__h - 1, __reg_4_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 7, __reg_4_0); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 6, __reg_4_1); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 5, __reg_4_2); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 4, __reg_4_3); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 3, __reg_4_4); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 2, __reg_4_0); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 1, __reg_4_1); __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __STORE(__h + 0, __reg_4_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 7, __reg_4_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 6, __reg_4_1); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 5, __reg_4_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 4, __reg_4_3); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 3, __reg_4_4); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 2, __reg_4_0); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 1, __reg_4_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h + 0, __reg_4_2); __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __STORE(__h + 1, __reg_4_3); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 7, __reg_4_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 6, __reg_4_1); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 5, __reg_4_2); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 4, __reg_4_3); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 3, __reg_4_4); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 2, __reg_4_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 1, __reg_4_1); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h + 0, __reg_4_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h + 1, __reg_4_3); __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __STORE(__h + 2, __reg_4_4); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 7, __reg_4_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 6, __reg_4_1); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 5, __reg_4_2); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 4, __reg_4_3); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 3, __reg_4_4); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 2, __reg_4_0); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 1, __reg_4_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h + 0, __reg_4_2); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h + 1, __reg_4_3); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h + 2, __reg_4_4); __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __STORE(__h + 3, __reg_4_0); } } else { for (__h = 17; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 8, __reg_4_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 8, __reg_4_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 8, __reg_4_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 8, __reg_4_3); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 8, __reg_4_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 8, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 8, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 8, __reg_4_3); __h++; } } __global__ void kernel0_3(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(2, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(3, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(4, __reg_3_4); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(5, __reg_3_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(6, __reg_3_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(6, __reg_3_1); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 6, __reg_3_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 6, __reg_3_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 6, __reg_3_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 6, __reg_3_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 5, __reg_3_3); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 4, __reg_3_4); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 3, __reg_3_0); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 2, __reg_3_1); __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __STORE(__h - 1, __reg_3_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 5, __reg_3_3); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 4, __reg_3_4); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 3, __reg_3_0); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 2, __reg_3_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 1, __reg_3_2); __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __STORE(__h + 0, __reg_3_3); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 5, __reg_3_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 4, __reg_3_4); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 3, __reg_3_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 2, __reg_3_1); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 1, __reg_3_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h + 0, __reg_3_3); __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __STORE(__h + 1, __reg_3_4); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 5, __reg_3_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 4, __reg_3_4); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 3, __reg_3_0); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 2, __reg_3_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 1, __reg_3_2); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h + 0, __reg_3_3); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h + 1, __reg_3_4); __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __STORE(__h + 2, __reg_3_0); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 5, __reg_3_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 4, __reg_3_4); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 3, __reg_3_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 2, __reg_3_1); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 1, __reg_3_2); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h + 0, __reg_3_3); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h + 1, __reg_3_4); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h + 2, __reg_3_0); __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __STORE(__h + 3, __reg_3_1); } } else { for (__h = 13; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 6, __reg_3_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 6, __reg_3_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 6, __reg_3_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 6, __reg_3_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 6, __reg_3_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 6, __reg_3_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 6, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 6, __reg_3_1); __h++; } } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(2, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(3, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(4, __reg_2_4); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(4, __reg_2_4); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 4, __reg_2_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 4, __reg_2_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 4, __reg_2_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h - 4, __reg_2_4); __h++; } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 3, __reg_2_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 2, __reg_2_2); __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __STORE(__h - 1, __reg_2_3); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 3, __reg_2_1); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 2, __reg_2_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 1, __reg_2_3); __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __STORE(__h + 0, __reg_2_4); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 3, __reg_2_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 2, __reg_2_2); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 1, __reg_2_3); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h + 0, __reg_2_4); __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __STORE(__h + 1, __reg_2_0); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 3, __reg_2_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 2, __reg_2_2); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 1, __reg_2_3); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h + 0, __reg_2_4); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h + 1, __reg_2_0); __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __STORE(__h + 2, __reg_2_1); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 3, __reg_2_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 2, __reg_2_2); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 1, __reg_2_3); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h + 0, __reg_2_4); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h + 1, __reg_2_0); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h + 2, __reg_2_1); __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __STORE(__h + 3, __reg_2_2); } } else { for (__h = 9; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 4, __reg_2_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 4, __reg_2_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 4, __reg_2_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h - 4, __reg_2_4); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 4, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 4, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 4, __reg_2_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h - 4, __reg_2_4); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(2, __reg_1_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(2, __reg_1_2); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 2, __reg_1_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 2, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 2, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 2, __reg_1_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __STORE(__h - 1, __reg_1_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 1, __reg_1_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 1, __reg_1_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 1, __reg_1_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 1, __reg_1_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); } } else { for (__h = 5; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 2, __reg_1_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 2, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 2, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 2, __reg_1_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 2, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 2, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 2, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 2, __reg_1_2); __h++; } }
914d16091cce64fd5f68c08d0499a0a6ba9845ef.cu
#include "box2d2r-512-9-512_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_9(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 476; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_5_3; double __reg_5_4; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_6_3; double __reg_6_4; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_7_3; double __reg_7_4; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_8_3; double __reg_8_4; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_9_3; double __reg_9_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(2, __reg_9_2); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(3, __reg_9_3); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(4, __reg_9_4); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(5, __reg_9_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(6, __reg_9_1); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(7, __reg_9_2); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(8, __reg_9_3); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(9, __reg_9_4); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(10, __reg_9_0); __LOAD(__reg_0, 29); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(11, __reg_9_1); __LOAD(__reg_0, 30); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(12, __reg_9_2); __LOAD(__reg_0, 31); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(13, __reg_9_3); __LOAD(__reg_0, 32); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(14, __reg_9_4); __LOAD(__reg_0, 33); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(15, __reg_9_0); __LOAD(__reg_0, 34); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(16, __reg_9_1); __LOAD(__reg_0, 35); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(17, __reg_9_2); __LOAD(__reg_0, 36); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(18, __reg_9_3); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __LOAD(__reg_0, 29); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __LOAD(__reg_0, 30); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __LOAD(__reg_0, 31); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __LOAD(__reg_0, 32); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __LOAD(__reg_0, 33); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __LOAD(__reg_0, 34); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __LOAD(__reg_0, 35); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __LOAD(__reg_0, 36); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(18, __reg_9_3); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 18, __reg_9_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 18, __reg_9_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 18, __reg_9_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 18, __reg_9_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 17, __reg_9_0); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 16, __reg_9_1); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 15, __reg_9_2); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 14, __reg_9_3); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 13, __reg_9_4); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 12, __reg_9_0); __reg_4_2 = __reg_3_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 11, __reg_9_1); __reg_4_3 = __reg_3_3; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 10, __reg_9_2); __reg_5_2 = __reg_4_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 9, __reg_9_3); __reg_5_3 = __reg_4_3; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 8, __reg_9_4); __reg_6_2 = __reg_5_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 7, __reg_9_0); __reg_6_3 = __reg_5_3; __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 6, __reg_9_1); __reg_7_2 = __reg_6_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 5, __reg_9_2); __reg_7_3 = __reg_6_3; __CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 4, __reg_9_3); __reg_8_2 = __reg_7_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 3, __reg_9_4); __reg_8_3 = __reg_7_3; __CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 2, __reg_9_0); __CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3); __STORE(__h - 1, __reg_9_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 17, __reg_9_0); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 16, __reg_9_1); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 15, __reg_9_2); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 14, __reg_9_3); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 13, __reg_9_4); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 12, __reg_9_0); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 11, __reg_9_1); __reg_4_3 = __reg_3_3; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 10, __reg_9_2); __reg_4_4 = __reg_3_4; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 9, __reg_9_3); __reg_5_3 = __reg_4_3; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 8, __reg_9_4); __reg_5_4 = __reg_4_4; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 7, __reg_9_0); __reg_6_3 = __reg_5_3; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 6, __reg_9_1); __reg_6_4 = __reg_5_4; __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 5, __reg_9_2); __reg_7_3 = __reg_6_3; __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 4, __reg_9_3); __reg_7_4 = __reg_6_4; __CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 3, __reg_9_4); __reg_8_3 = __reg_7_3; __CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 2, __reg_9_0); __reg_8_4 = __reg_7_4; __CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 1, __reg_9_1); __CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4); __STORE(__h + 0, __reg_9_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 17, __reg_9_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 16, __reg_9_1); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 15, __reg_9_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 14, __reg_9_3); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 13, __reg_9_4); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 12, __reg_9_0); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 11, __reg_9_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 10, __reg_9_2); __reg_4_4 = __reg_3_4; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 9, __reg_9_3); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 8, __reg_9_4); __reg_5_4 = __reg_4_4; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 7, __reg_9_0); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 6, __reg_9_1); __reg_6_4 = __reg_5_4; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 5, __reg_9_2); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 4, __reg_9_3); __reg_7_4 = __reg_6_4; __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 3, __reg_9_4); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 2, __reg_9_0); __reg_8_4 = __reg_7_4; __CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 1, __reg_9_1); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h + 0, __reg_9_2); __CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0); __STORE(__h + 1, __reg_9_3); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 17, __reg_9_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 16, __reg_9_1); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 15, __reg_9_2); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 14, __reg_9_3); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 13, __reg_9_4); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 12, __reg_9_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 11, __reg_9_1); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 10, __reg_9_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 9, __reg_9_3); __reg_4_0 = __reg_3_0; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 8, __reg_9_4); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 7, __reg_9_0); __reg_5_0 = __reg_4_0; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 6, __reg_9_1); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 5, __reg_9_2); __reg_6_0 = __reg_5_0; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 4, __reg_9_3); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 3, __reg_9_4); __reg_7_0 = __reg_6_0; __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 2, __reg_9_0); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 1, __reg_9_1); __reg_8_0 = __reg_7_0; __CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h + 0, __reg_9_2); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h + 1, __reg_9_3); __CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1); __STORE(__h + 2, __reg_9_4); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 17, __reg_9_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 16, __reg_9_1); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 15, __reg_9_2); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 14, __reg_9_3); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 13, __reg_9_4); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 12, __reg_9_0); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 11, __reg_9_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 10, __reg_9_2); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 9, __reg_9_3); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 8, __reg_9_4); __reg_4_1 = __reg_3_1; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 7, __reg_9_0); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 6, __reg_9_1); __reg_5_1 = __reg_4_1; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 5, __reg_9_2); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 4, __reg_9_3); __reg_6_1 = __reg_5_1; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 3, __reg_9_4); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 2, __reg_9_0); __reg_7_1 = __reg_6_1; __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 1, __reg_9_1); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h + 0, __reg_9_2); __reg_8_1 = __reg_7_1; __CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h + 1, __reg_9_3); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h + 2, __reg_9_4); __CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2); __STORE(__h + 3, __reg_9_0); } } else { for (__h = 37; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 18, __reg_9_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 18, __reg_9_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 18, __reg_9_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 18, __reg_9_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1); __STORE(__h - 18, __reg_9_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2); __STORE(__h - 18, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3); __STORE(__h - 18, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4); __STORE(__h - 18, __reg_9_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0); __STORE(__h - 18, __reg_9_3); __h++; } } __global__ void kernel0_8(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_5_3; double __reg_5_4; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_6_3; double __reg_6_4; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_7_3; double __reg_7_4; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_8_3; double __reg_8_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(2, __reg_8_2); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(3, __reg_8_3); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(4, __reg_8_4); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(5, __reg_8_0); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(6, __reg_8_1); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(7, __reg_8_2); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(8, __reg_8_3); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(9, __reg_8_4); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(10, __reg_8_0); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(11, __reg_8_1); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(12, __reg_8_2); __LOAD(__reg_0, 29); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(13, __reg_8_3); __LOAD(__reg_0, 30); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(14, __reg_8_4); __LOAD(__reg_0, 31); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(15, __reg_8_0); __LOAD(__reg_0, 32); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(16, __reg_8_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __LOAD(__reg_0, 29); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __LOAD(__reg_0, 30); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __LOAD(__reg_0, 31); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __LOAD(__reg_0, 32); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(16, __reg_8_1); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 16, __reg_8_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 16, __reg_8_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 16, __reg_8_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 16, __reg_8_1); __h++; } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 15, __reg_8_3); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 14, __reg_8_4); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 13, __reg_8_0); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 12, __reg_8_1); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 11, __reg_8_2); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 10, __reg_8_3); __reg_4_3 = __reg_3_3; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 9, __reg_8_4); __reg_4_4 = __reg_3_4; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 8, __reg_8_0); __reg_5_3 = __reg_4_3; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 7, __reg_8_1); __reg_5_4 = __reg_4_4; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 6, __reg_8_2); __reg_6_3 = __reg_5_3; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 5, __reg_8_3); __reg_6_4 = __reg_5_4; __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 4, __reg_8_4); __reg_7_3 = __reg_6_3; __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 3, __reg_8_0); __reg_7_4 = __reg_6_4; __CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 2, __reg_8_1); __CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4); __STORE(__h - 1, __reg_8_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 15, __reg_8_3); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 14, __reg_8_4); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 13, __reg_8_0); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 12, __reg_8_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 11, __reg_8_2); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 10, __reg_8_3); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 9, __reg_8_4); __reg_4_4 = __reg_3_4; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 8, __reg_8_0); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 7, __reg_8_1); __reg_5_4 = __reg_4_4; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 6, __reg_8_2); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 5, __reg_8_3); __reg_6_4 = __reg_5_4; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 4, __reg_8_4); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 3, __reg_8_0); __reg_7_4 = __reg_6_4; __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 2, __reg_8_1); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 1, __reg_8_2); __CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0); __STORE(__h + 0, __reg_8_3); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 15, __reg_8_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 14, __reg_8_4); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 13, __reg_8_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 12, __reg_8_1); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 11, __reg_8_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 10, __reg_8_3); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 9, __reg_8_4); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 8, __reg_8_0); __reg_4_0 = __reg_3_0; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 7, __reg_8_1); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 6, __reg_8_2); __reg_5_0 = __reg_4_0; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 5, __reg_8_3); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 4, __reg_8_4); __reg_6_0 = __reg_5_0; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 3, __reg_8_0); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 2, __reg_8_1); __reg_7_0 = __reg_6_0; __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 1, __reg_8_2); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h + 0, __reg_8_3); __CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1); __STORE(__h + 1, __reg_8_4); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 15, __reg_8_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 14, __reg_8_4); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 13, __reg_8_0); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 12, __reg_8_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 11, __reg_8_2); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 10, __reg_8_3); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 9, __reg_8_4); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 8, __reg_8_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 7, __reg_8_1); __reg_4_1 = __reg_3_1; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 6, __reg_8_2); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 5, __reg_8_3); __reg_5_1 = __reg_4_1; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 4, __reg_8_4); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 3, __reg_8_0); __reg_6_1 = __reg_5_1; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 2, __reg_8_1); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 1, __reg_8_2); __reg_7_1 = __reg_6_1; __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h + 0, __reg_8_3); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h + 1, __reg_8_4); __CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2); __STORE(__h + 2, __reg_8_0); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 15, __reg_8_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 14, __reg_8_4); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 13, __reg_8_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 12, __reg_8_1); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 11, __reg_8_2); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 10, __reg_8_3); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 9, __reg_8_4); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 8, __reg_8_0); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 7, __reg_8_1); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 6, __reg_8_2); __reg_4_2 = __reg_3_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 5, __reg_8_3); __reg_4_3 = __reg_3_3; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 4, __reg_8_4); __reg_5_2 = __reg_4_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 3, __reg_8_0); __reg_5_3 = __reg_4_3; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 2, __reg_8_1); __reg_6_2 = __reg_5_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 1, __reg_8_2); __reg_6_3 = __reg_5_3; __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h + 0, __reg_8_3); __reg_7_2 = __reg_6_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h + 1, __reg_8_4); __reg_7_3 = __reg_6_3; __CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h + 2, __reg_8_0); __CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3); __STORE(__h + 3, __reg_8_1); } } else { for (__h = 33; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 16, __reg_8_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 16, __reg_8_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 16, __reg_8_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 16, __reg_8_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4); __STORE(__h - 16, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0); __STORE(__h - 16, __reg_8_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1); __STORE(__h - 16, __reg_8_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2); __STORE(__h - 16, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3); __STORE(__h - 16, __reg_8_1); __h++; } } __global__ void kernel0_7(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_5_3; double __reg_5_4; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_6_3; double __reg_6_4; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_7_3; double __reg_7_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(2, __reg_7_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(3, __reg_7_3); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(4, __reg_7_4); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(5, __reg_7_0); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(6, __reg_7_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(7, __reg_7_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(8, __reg_7_3); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(9, __reg_7_4); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(10, __reg_7_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(11, __reg_7_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(12, __reg_7_2); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(13, __reg_7_3); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(14, __reg_7_4); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __LOAD(__reg_0, 26); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __LOAD(__reg_0, 27); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __LOAD(__reg_0, 28); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(14, __reg_7_4); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 14, __reg_7_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 14, __reg_7_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 14, __reg_7_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 14, __reg_7_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 13, __reg_7_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 12, __reg_7_2); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 11, __reg_7_3); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 10, __reg_7_4); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 9, __reg_7_0); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 8, __reg_7_1); __reg_4_4 = __reg_3_4; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 7, __reg_7_2); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 6, __reg_7_3); __reg_5_4 = __reg_4_4; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 5, __reg_7_4); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 4, __reg_7_0); __reg_6_4 = __reg_5_4; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 3, __reg_7_1); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 2, __reg_7_2); __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0); __STORE(__h - 1, __reg_7_3); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 13, __reg_7_1); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 12, __reg_7_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 11, __reg_7_3); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 10, __reg_7_4); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 9, __reg_7_0); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 8, __reg_7_1); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 7, __reg_7_2); __reg_4_0 = __reg_3_0; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 6, __reg_7_3); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 5, __reg_7_4); __reg_5_0 = __reg_4_0; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 4, __reg_7_0); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 3, __reg_7_1); __reg_6_0 = __reg_5_0; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 2, __reg_7_2); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 1, __reg_7_3); __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1); __STORE(__h + 0, __reg_7_4); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 13, __reg_7_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 12, __reg_7_2); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 11, __reg_7_3); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 10, __reg_7_4); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 9, __reg_7_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 8, __reg_7_1); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 7, __reg_7_2); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 6, __reg_7_3); __reg_4_1 = __reg_3_1; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 5, __reg_7_4); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 4, __reg_7_0); __reg_5_1 = __reg_4_1; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 3, __reg_7_1); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 2, __reg_7_2); __reg_6_1 = __reg_5_1; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 1, __reg_7_3); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h + 0, __reg_7_4); __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2); __STORE(__h + 1, __reg_7_0); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 13, __reg_7_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 12, __reg_7_2); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 11, __reg_7_3); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 10, __reg_7_4); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 9, __reg_7_0); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 8, __reg_7_1); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 7, __reg_7_2); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 6, __reg_7_3); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 5, __reg_7_4); __reg_4_2 = __reg_3_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 4, __reg_7_0); __reg_4_3 = __reg_3_3; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 3, __reg_7_1); __reg_5_2 = __reg_4_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 2, __reg_7_2); __reg_5_3 = __reg_4_3; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 1, __reg_7_3); __reg_6_2 = __reg_5_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h + 0, __reg_7_4); __reg_6_3 = __reg_5_3; __CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h + 1, __reg_7_0); __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3); __STORE(__h + 2, __reg_7_1); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 13, __reg_7_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 12, __reg_7_2); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 11, __reg_7_3); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 10, __reg_7_4); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 9, __reg_7_0); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 8, __reg_7_1); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 7, __reg_7_2); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 6, __reg_7_3); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 5, __reg_7_4); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 4, __reg_7_0); __reg_4_3 = __reg_3_3; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 3, __reg_7_1); __reg_4_4 = __reg_3_4; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 2, __reg_7_2); __reg_5_3 = __reg_4_3; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 1, __reg_7_3); __reg_5_4 = __reg_4_4; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h + 0, __reg_7_4); __reg_6_3 = __reg_5_3; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h + 1, __reg_7_0); __reg_6_4 = __reg_5_4; __CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h + 2, __reg_7_1); __CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4); __STORE(__h + 3, __reg_7_2); } } else { for (__h = 29; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 14, __reg_7_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 14, __reg_7_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 14, __reg_7_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 14, __reg_7_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2); __STORE(__h - 14, __reg_7_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3); __STORE(__h - 14, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4); __STORE(__h - 14, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0); __STORE(__h - 14, __reg_7_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1); __STORE(__h - 14, __reg_7_4); __h++; } } __global__ void kernel0_6(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_5_3; double __reg_5_4; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_6_3; double __reg_6_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(2, __reg_6_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(3, __reg_6_3); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(4, __reg_6_4); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(5, __reg_6_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(6, __reg_6_1); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(7, __reg_6_2); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(8, __reg_6_3); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(9, __reg_6_4); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(10, __reg_6_0); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(11, __reg_6_1); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(12, __reg_6_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(12, __reg_6_2); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 12, __reg_6_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 12, __reg_6_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 12, __reg_6_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 12, __reg_6_2); __h++; } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 11, __reg_6_4); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 10, __reg_6_0); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 9, __reg_6_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 8, __reg_6_2); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 7, __reg_6_3); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 6, __reg_6_4); __reg_4_0 = __reg_3_0; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 5, __reg_6_0); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 4, __reg_6_1); __reg_5_0 = __reg_4_0; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 3, __reg_6_2); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 2, __reg_6_3); __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1); __STORE(__h - 1, __reg_6_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 11, __reg_6_4); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 10, __reg_6_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 9, __reg_6_1); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 8, __reg_6_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 7, __reg_6_3); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 6, __reg_6_4); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 5, __reg_6_0); __reg_4_1 = __reg_3_1; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 4, __reg_6_1); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 3, __reg_6_2); __reg_5_1 = __reg_4_1; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 2, __reg_6_3); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 1, __reg_6_4); __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2); __STORE(__h + 0, __reg_6_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 11, __reg_6_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 10, __reg_6_0); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 9, __reg_6_1); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 8, __reg_6_2); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 7, __reg_6_3); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 6, __reg_6_4); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 5, __reg_6_0); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 4, __reg_6_1); __reg_4_2 = __reg_3_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 3, __reg_6_2); __reg_4_3 = __reg_3_3; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 2, __reg_6_3); __reg_5_2 = __reg_4_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 1, __reg_6_4); __reg_5_3 = __reg_4_3; __CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h + 0, __reg_6_0); __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3); __STORE(__h + 1, __reg_6_1); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 11, __reg_6_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 10, __reg_6_0); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 9, __reg_6_1); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 8, __reg_6_2); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 7, __reg_6_3); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 6, __reg_6_4); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 5, __reg_6_0); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 4, __reg_6_1); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 3, __reg_6_2); __reg_4_3 = __reg_3_3; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 2, __reg_6_3); __reg_4_4 = __reg_3_4; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 1, __reg_6_4); __reg_5_3 = __reg_4_3; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h + 0, __reg_6_0); __reg_5_4 = __reg_4_4; __CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h + 1, __reg_6_1); __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4); __STORE(__h + 2, __reg_6_2); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 11, __reg_6_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 10, __reg_6_0); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 9, __reg_6_1); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 8, __reg_6_2); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 7, __reg_6_3); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 6, __reg_6_4); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 5, __reg_6_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 4, __reg_6_1); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 3, __reg_6_2); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 2, __reg_6_3); __reg_4_4 = __reg_3_4; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 1, __reg_6_4); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h + 0, __reg_6_0); __reg_5_4 = __reg_4_4; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h + 1, __reg_6_1); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h + 2, __reg_6_2); __CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0); __STORE(__h + 3, __reg_6_3); } } else { for (__h = 25; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 12, __reg_6_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 12, __reg_6_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 12, __reg_6_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 12, __reg_6_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0); __STORE(__h - 12, __reg_6_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1); __STORE(__h - 12, __reg_6_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2); __STORE(__h - 12, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3); __STORE(__h - 12, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4); __STORE(__h - 12, __reg_6_2); __h++; } } __global__ void kernel0_5(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_5_3; double __reg_5_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(2, __reg_5_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(3, __reg_5_3); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(4, __reg_5_4); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(5, __reg_5_0); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(6, __reg_5_1); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(7, __reg_5_2); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(8, __reg_5_3); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(9, __reg_5_4); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(10, __reg_5_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __LOAD(__reg_0, 17); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __LOAD(__reg_0, 18); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(10, __reg_5_0); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 10, __reg_5_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 10, __reg_5_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 10, __reg_5_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 10, __reg_5_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 9, __reg_5_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 8, __reg_5_3); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 7, __reg_5_4); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 6, __reg_5_0); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 5, __reg_5_1); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 4, __reg_5_2); __reg_4_1 = __reg_3_1; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 3, __reg_5_3); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 2, __reg_5_4); __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2); __STORE(__h - 1, __reg_5_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 9, __reg_5_2); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 8, __reg_5_3); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 7, __reg_5_4); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 6, __reg_5_0); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 5, __reg_5_1); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 4, __reg_5_2); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 3, __reg_5_3); __reg_4_2 = __reg_3_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 2, __reg_5_4); __reg_4_3 = __reg_3_3; __CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 1, __reg_5_0); __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3); __STORE(__h + 0, __reg_5_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 9, __reg_5_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 8, __reg_5_3); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 7, __reg_5_4); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 6, __reg_5_0); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 5, __reg_5_1); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 4, __reg_5_2); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 3, __reg_5_3); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 2, __reg_5_4); __reg_4_3 = __reg_3_3; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 1, __reg_5_0); __reg_4_4 = __reg_3_4; __CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h + 0, __reg_5_1); __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4); __STORE(__h + 1, __reg_5_2); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 9, __reg_5_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 8, __reg_5_3); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 7, __reg_5_4); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 6, __reg_5_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 5, __reg_5_1); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 4, __reg_5_2); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 3, __reg_5_3); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 2, __reg_5_4); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 1, __reg_5_0); __reg_4_4 = __reg_3_4; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h + 0, __reg_5_1); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h + 1, __reg_5_2); __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0); __STORE(__h + 2, __reg_5_3); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 9, __reg_5_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 8, __reg_5_3); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 7, __reg_5_4); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 6, __reg_5_0); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 5, __reg_5_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 4, __reg_5_2); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 3, __reg_5_3); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 2, __reg_5_4); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 1, __reg_5_0); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h + 0, __reg_5_1); __reg_4_0 = __reg_3_0; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h + 1, __reg_5_2); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h + 2, __reg_5_3); __CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1); __STORE(__h + 3, __reg_5_4); } } else { for (__h = 21; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 10, __reg_5_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 10, __reg_5_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 10, __reg_5_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 10, __reg_5_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3); __STORE(__h - 10, __reg_5_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4); __STORE(__h - 10, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0); __STORE(__h - 10, __reg_5_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1); __STORE(__h - 10, __reg_5_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2); __STORE(__h - 10, __reg_5_0); __h++; } } __global__ void kernel0_4(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_4_3; double __reg_4_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(2, __reg_4_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(3, __reg_4_3); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(4, __reg_4_4); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(5, __reg_4_0); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(6, __reg_4_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(7, __reg_4_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(8, __reg_4_3); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __LOAD(__reg_0, 15); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __LOAD(__reg_0, 16); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(8, __reg_4_3); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 8, __reg_4_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 8, __reg_4_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 8, __reg_4_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 8, __reg_4_3); __h++; } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 7, __reg_4_0); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 6, __reg_4_1); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 5, __reg_4_2); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 4, __reg_4_3); __reg_3_2 = __reg_2_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 3, __reg_4_4); __reg_3_3 = __reg_2_3; __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 2, __reg_4_0); __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3); __STORE(__h - 1, __reg_4_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 7, __reg_4_0); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 6, __reg_4_1); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 5, __reg_4_2); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 4, __reg_4_3); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 3, __reg_4_4); __reg_3_3 = __reg_2_3; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 2, __reg_4_0); __reg_3_4 = __reg_2_4; __CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 1, __reg_4_1); __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4); __STORE(__h + 0, __reg_4_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 7, __reg_4_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 6, __reg_4_1); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 5, __reg_4_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 4, __reg_4_3); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 3, __reg_4_4); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 2, __reg_4_0); __reg_3_4 = __reg_2_4; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 1, __reg_4_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h + 0, __reg_4_2); __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0); __STORE(__h + 1, __reg_4_3); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 7, __reg_4_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 6, __reg_4_1); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 5, __reg_4_2); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 4, __reg_4_3); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 3, __reg_4_4); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 2, __reg_4_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 1, __reg_4_1); __reg_3_0 = __reg_2_0; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h + 0, __reg_4_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h + 1, __reg_4_3); __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1); __STORE(__h + 2, __reg_4_4); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 7, __reg_4_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 6, __reg_4_1); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 5, __reg_4_2); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 4, __reg_4_3); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 3, __reg_4_4); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 2, __reg_4_0); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 1, __reg_4_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h + 0, __reg_4_2); __reg_3_1 = __reg_2_1; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h + 1, __reg_4_3); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h + 2, __reg_4_4); __CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2); __STORE(__h + 3, __reg_4_0); } } else { for (__h = 17; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 8, __reg_4_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 8, __reg_4_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 8, __reg_4_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 8, __reg_4_3); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1); __STORE(__h - 8, __reg_4_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2); __STORE(__h - 8, __reg_4_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3); __STORE(__h - 8, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4); __STORE(__h - 8, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0); __STORE(__h - 8, __reg_4_3); __h++; } } __global__ void kernel0_3(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_3_3; double __reg_3_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(2, __reg_3_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(3, __reg_3_3); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(4, __reg_3_4); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(5, __reg_3_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(6, __reg_3_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(6, __reg_3_1); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 6, __reg_3_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 6, __reg_3_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 6, __reg_3_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 6, __reg_3_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 5, __reg_3_3); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 4, __reg_3_4); __reg_2_3 = __reg_1_3; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 3, __reg_3_0); __reg_2_4 = __reg_1_4; __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 2, __reg_3_1); __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4); __STORE(__h - 1, __reg_3_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 5, __reg_3_3); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 4, __reg_3_4); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 3, __reg_3_0); __reg_2_4 = __reg_1_4; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 2, __reg_3_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 1, __reg_3_2); __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0); __STORE(__h + 0, __reg_3_3); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 5, __reg_3_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 4, __reg_3_4); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 3, __reg_3_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 2, __reg_3_1); __reg_2_0 = __reg_1_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 1, __reg_3_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h + 0, __reg_3_3); __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1); __STORE(__h + 1, __reg_3_4); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 5, __reg_3_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 4, __reg_3_4); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 3, __reg_3_0); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 2, __reg_3_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 1, __reg_3_2); __reg_2_1 = __reg_1_1; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h + 0, __reg_3_3); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h + 1, __reg_3_4); __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2); __STORE(__h + 2, __reg_3_0); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 5, __reg_3_3); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 4, __reg_3_4); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 3, __reg_3_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 2, __reg_3_1); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 1, __reg_3_2); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h + 0, __reg_3_3); __reg_2_2 = __reg_1_2; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h + 1, __reg_3_4); __reg_2_3 = __reg_1_3; __CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h + 2, __reg_3_0); __CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3); __STORE(__h + 3, __reg_3_1); } } else { for (__h = 13; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 6, __reg_3_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 6, __reg_3_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 6, __reg_3_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 6, __reg_3_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4); __STORE(__h - 6, __reg_3_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0); __STORE(__h - 6, __reg_3_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1); __STORE(__h - 6, __reg_3_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2); __STORE(__h - 6, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3); __STORE(__h - 6, __reg_3_1); __h++; } } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(2, __reg_2_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(3, __reg_2_3); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(4, __reg_2_4); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __LOAD(__reg_0, 6); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __LOAD(__reg_0, 7); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(4, __reg_2_4); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 4, __reg_2_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 4, __reg_2_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 4, __reg_2_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h - 4, __reg_2_4); __h++; } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __reg_1_4 = __reg_0; __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 3, __reg_2_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 2, __reg_2_2); __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0); __STORE(__h - 1, __reg_2_3); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 3, __reg_2_1); __reg_1_0 = __reg_0; __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 2, __reg_2_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 1, __reg_2_3); __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1); __STORE(__h + 0, __reg_2_4); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 3, __reg_2_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 2, __reg_2_2); __reg_1_1 = __reg_0; __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 1, __reg_2_3); __reg_1_2 = __reg_0; __CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h + 0, __reg_2_4); __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2); __STORE(__h + 1, __reg_2_0); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 3, __reg_2_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 2, __reg_2_2); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 1, __reg_2_3); __reg_1_2 = __reg_0; __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h + 0, __reg_2_4); __reg_1_3 = __reg_0; __CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h + 1, __reg_2_0); __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3); __STORE(__h + 2, __reg_2_1); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 3, __reg_2_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 2, __reg_2_2); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 1, __reg_2_3); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h + 0, __reg_2_4); __reg_1_3 = __reg_0; __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h + 1, __reg_2_0); __reg_1_4 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h + 2, __reg_2_1); __CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4); __STORE(__h + 3, __reg_2_2); } } else { for (__h = 9; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 4, __reg_2_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 4, __reg_2_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 4, __reg_2_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h - 4, __reg_2_4); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2); __STORE(__h - 4, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3); __STORE(__h - 4, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4); __STORE(__h - 4, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0); __STORE(__h - 4, __reg_2_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1); __STORE(__h - 4, __reg_2_4); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(2, __reg_1_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(2, __reg_1_2); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 2, __reg_1_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 2, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 2, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 2, __reg_1_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __STORE(__h - 1, __reg_1_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 1, __reg_1_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 1, __reg_1_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 1, __reg_1_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 1, __reg_1_4); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); } } else { for (__h = 5; __h <= __side1LenOl - 5;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 2, __reg_1_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 2, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 2, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 2, __reg_1_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0); __STORE(__h - 2, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 2, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 2, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 2, __reg_1_2); __h++; } }
scanLabels.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * CCL3D.cu */ #define CCL_BLOCK_SIZE_X 8 #define CCL_BLOCK_SIZE_Y 8 #define CCL_BLOCK_SIZE_Z 8 __device__ int d_isNotDone; __global__ void scanLabels(int* labels, int w, int h, int d) { const int x = blockIdx.x * CCL_BLOCK_SIZE_X + threadIdx.x; const int y = blockIdx.y * CCL_BLOCK_SIZE_Y + threadIdx.y; const int z = blockIdx.z * CCL_BLOCK_SIZE_Z + threadIdx.z; const int index = (z*h + y)*w + x; if (x >= w || y >= h || z >= d) return; const int Z1 = w*h; const int Y1 = w; int lcur = labels[index]; if (lcur) { int lmin = index; // MAX // 26-neighbors int lne, pos; for (int Zdif = -Z1; Zdif <= Z1; Zdif += Z1) { for (int Ydif = -Y1; Ydif <= Y1; Ydif += Y1) { for (int Xdif = -1; Xdif <= 1; Xdif += 1) { pos = index + Zdif + Ydif + Xdif; lne = (pos >= 0 && pos < w*h*d) ? labels[pos] : 0; // circular boundary if (lne && lne < lmin) lmin = lne; } } } // need not (Xdif,Ydif,Zdif)=(0,0,0) but no problem if (lmin < lcur) { int lpa = labels[lcur]; labels[lpa] = min(lpa, lmin); d_isNotDone = 1; } } }
scanLabels.cu
#include "includes.h" /* * CCL3D.cu */ #define CCL_BLOCK_SIZE_X 8 #define CCL_BLOCK_SIZE_Y 8 #define CCL_BLOCK_SIZE_Z 8 __device__ int d_isNotDone; __global__ void scanLabels(int* labels, int w, int h, int d) { const int x = blockIdx.x * CCL_BLOCK_SIZE_X + threadIdx.x; const int y = blockIdx.y * CCL_BLOCK_SIZE_Y + threadIdx.y; const int z = blockIdx.z * CCL_BLOCK_SIZE_Z + threadIdx.z; const int index = (z*h + y)*w + x; if (x >= w || y >= h || z >= d) return; const int Z1 = w*h; const int Y1 = w; int lcur = labels[index]; if (lcur) { int lmin = index; // MAX // 26-neighbors int lne, pos; for (int Zdif = -Z1; Zdif <= Z1; Zdif += Z1) { for (int Ydif = -Y1; Ydif <= Y1; Ydif += Y1) { for (int Xdif = -1; Xdif <= 1; Xdif += 1) { pos = index + Zdif + Ydif + Xdif; lne = (pos >= 0 && pos < w*h*d) ? labels[pos] : 0; // circular boundary if (lne && lne < lmin) lmin = lne; } } } // need not (Xdif,Ydif,Zdif)=(0,0,0) but no problem if (lmin < lcur) { int lpa = labels[lcur]; labels[lpa] = min(lpa, lmin); d_isNotDone = 1; } } }
7b95bfe2039ec32488504c0f1f85ab071f8304fa.hip
// !!! This is a file automatically generated by hipify!!! /* * Contar cuerpos celestes * * Asignatura Computacin Paralela (Grado Ingeniera Informtica) * Cdigo secuencial base * * @author Ana Moretn Fernndez, Arturo Gonzalez-Escribano * @author Luis Higuero Casado, Esther Cuervo Fernndez * @version v1.3 * * (c) 2017, Grupo Trasgo, Universidad de Valladolid */ #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include "cputils.h" /* Substituir min por el operador */ #define min(x,y) ((x) < (y)? (x) : (y)) #define THREADSPORBLOQUE 128 /** * Funcion secuencial para la busqueda de mi bloque */ __device__ int computation(int x, int y, int columns, int* matrixData, int *matrixResult, int *matrixResultCopy){ // Inicialmente cojo mi indice int result=matrixResultCopy[x*columns+y]; if( result!= -1){ //Si es de mi mismo grupo, entonces actualizo if(matrixData[(x-1)*columns+y] == matrixData[x*columns+y]) { result = min (result, matrixResultCopy[(x-1)*columns+y]); } if(matrixData[(x+1)*columns+y] == matrixData[x*columns+y]) { result = min (result, matrixResultCopy[(x+1)*columns+y]); } if(matrixData[x*columns+y-1] == matrixData[x*columns+y]) { result = min (result, matrixResultCopy[x*columns+y-1]); } if(matrixData[x*columns+y+1] == matrixData[x*columns+y]) { result = min (result, matrixResultCopy[x*columns+y+1]); } // Si el indice no ha cambiado retorna 0 if(matrixResult[x*columns+y] == result){ return 0; } // Si el indice cambia, actualizo matrix de resultados con el indice adecuado y retorno 1 else { matrixResult[x*columns+y]=result; return 1;} } return 0; } /*Definicion de kernels*/ __global__ void etiquetadoInicial(int *matrixResult, int *matrixResultCopy, int *matrixData, int tamMatriz){ int indiceThread = (blockIdx.x*blockDim.x)+(threadIdx.x); if(indiceThread < tamMatriz){ matrixResultCopy[indiceThread] = -1; matrixResult[indiceThread] = -1; if(matrixData[indiceThread] != 0){ matrixResult[indiceThread] = indiceThread; } } } __global__ void actualizacionCopia(int *matrixResult, int *matrixResultCopy, int tamMatriz){ int indiceThread = (blockIdx.x*blockDim.x)+(threadIdx.x); if(indiceThread < tamMatriz){ if(matrixResult[indiceThread] != -1){ matrixResultCopy[indiceThread] = matrixResult[indiceThread]; } } } __global__ void computo(int *matrixResult, int *matrixResultCopy, int *matrixData, int filas, int columnas, int *arrayCambio){ int indiceThread = (blockIdx.x*blockDim.x)+(threadIdx.x); if(indiceThread < columnas*filas){ int x,y; x = indiceThread/columnas; //la fila en la que est la posicion es el entero resultante de dividir el indice por el tamao de la fila... y = indiceThread%columnas; //y la columna es el resto arrayCambio[indiceThread] = computation(x, y, columnas, matrixData, matrixResult, matrixResultCopy); } } //Esta funcion suma todo su bloque en su posicion de auxiliar __global__ void recuento(int *arrayCambio, int *arrayAux, int rows , int columns){ int numbloque = blockIdx.x; int indiceThreadGlobal = (blockIdx.x*blockDim.x)+(threadIdx.x); int i; for (i=2;i<=blockDim.x;i=i*2){ arrayCambio[indiceThreadGlobal]=arrayCambio[indiceThreadGlobal]+arrayCambio[indiceThreadGlobal+i/2]; __syncthreads(); } if((indiceThreadGlobal%blockDim.x)==0){ arrayAux[numbloque]=arrayCambio[indiceThreadGlobal]; } } /*Fin de kernels*/ /** * Funcion principal */ int main (int argc, char* argv[]) { /* 1. Leer argumento y declaraciones */ if (argc < 2) { printf("Uso: %s <imagen_a_procesar>\n", argv[0]); return(EXIT_SUCCESS); } char* image_filename = argv[1]; int rows=-1; int columns =-1; int *matrixData=NULL; int *matrixResult=NULL; int *matrixResultCopy=NULL; int numBlocks=-1; /* 2. Leer Fichero de entrada e inicializar datos */ /* 2.1 Abrir fichero */ FILE *f = cp_abrir_fichero(image_filename); // Compruebo que no ha habido errores if (f==NULL) { perror ("Error al abrir fichero.txt"); return -1; } /* 2.2 Leo valores del fichero */ int i,j; fscanf (f, "%d\n", &rows); fscanf (f, "%d\n", &columns); // Aado dos filas y dos columnas mas para los bordes rows=rows+2; columns = columns+2; /* 2.3 Reservo la memoria necesaria para la matriz de datos */ matrixData= (int *)malloc( rows*(columns) * sizeof(int) ); if ( (matrixData == NULL) ) { perror ("Error reservando memoria"); return -1; } /* 2.4 Inicializo matrices */ for(i=0;i< rows; i++){ for(j=0;j< columns; j++){ matrixData[i*(columns)+j]=-1; } } /* 2.5 Relleno bordes de la matriz */ for(i=1;i<rows-1;i++){ matrixData[i*(columns)+0]=0; matrixData[i*(columns)+columns-1]=0; } for(i=1;i<columns-1;i++){ matrixData[0*(columns)+i]=0; matrixData[(rows-1)*(columns)+i]=0; } /* 2.6 Relleno la matriz con los datos del fichero */ for(i=1;i<rows-1;i++){ for(j=1;j<columns-1;j++){ fscanf (f, "%d\n", &matrixData[i*(columns)+j]); } } fclose(f); #ifdef WRITE printf("Inicializacion \n"); for(i=0;i<rows;i++){ for(j=0;j<columns;j++){ printf ("%d\t", matrixData[i*(columns)+j]); } printf("\n"); } #endif hipSetDevice(0); hipDeviceSynchronize(); /* PUNTO DE INICIO MEDIDA DE TIEMPO */ double t_ini = cp_Wtime(); // // EL CODIGO A PARALELIZAR COMIENZA AQUI // hipError_t err1, err2, err3, err4, err5; //variables para comprobacin de errores. int tamMatriz = rows*columns; int numbloques=tamMatriz/THREADSPORBLOQUE + (tamMatriz%THREADSPORBLOQUE != 0); matrixResult= (int *)malloc( (rows)*(columns) * sizeof(int) ); int *arrayCambio = (int *)malloc(numbloques*sizeof(int)); if ( (matrixResult == NULL) || (arrayCambio==NULL) ) { perror ("Error reservando memoria"); return -1; } /*Envio de las matrices a la GPU*/ //Inicializacion int *GPUmatrixResult; int *GPUmatrixResultCopy; int *GPUmatrixData; int *GPUArrayCambio; int *GPUArrayCambioAux; err1 = hipMalloc(&GPUmatrixResult, rows*columns*sizeof(int)); err2 = hipMalloc(&GPUmatrixResultCopy, rows*columns*sizeof(int)); err3 = hipMalloc(&GPUmatrixData, rows*columns*sizeof(int)); err4 = hipMalloc(&GPUArrayCambio, rows*columns*sizeof(int)); err5 = hipMalloc(&GPUArrayCambioAux,numbloques*sizeof(int)); if(err1 != hipSuccess || err2 != hipSuccess || err3 != hipSuccess || err4 != hipSuccess || err5!=hipSuccess){ printf("Error en el reservado de memoria GPU\n"); return -1; } //Envio a GPU err1 = hipMemcpy(GPUmatrixData, matrixData, rows*columns*sizeof(int), hipMemcpyHostToDevice); if(err1 != hipSuccess){ printf("Error enviando las matrices a la GPU\n"); return -1; } /*Definicion de grids*/ dim3 bloque(THREADSPORBLOQUE,1); dim3 grid(numbloques,1); /* 3. Etiquetado inicial */ hipLaunchKernelGGL(( etiquetadoInicial), dim3(grid),dim3(bloque), 0, 0, GPUmatrixResult, GPUmatrixResultCopy, GPUmatrixData,tamMatriz); /* 4. Computacion */ int t=0; /* 4.1 Flag para ver si ha habido cambios y si se continua la ejecucion */ int flagCambio=1; /* 4.2 Busqueda de los bloques similiares */ for(t=0; flagCambio !=0; t++){ flagCambio=0; /* 4.2.1 Actualizacion copia */ hipLaunchKernelGGL(( actualizacionCopia), dim3(grid),dim3(bloque), 0, 0, GPUmatrixResult,GPUmatrixResultCopy,tamMatriz); /* 4.2.2 Computo y detecto si ha habido cambios */ hipLaunchKernelGGL(( computo), dim3(grid),dim3(bloque), 0, 0, GPUmatrixResult, GPUmatrixResultCopy, GPUmatrixData, rows, columns, GPUArrayCambio); hipLaunchKernelGGL(( recuento), dim3(grid),dim3(bloque), 0, 0, GPUArrayCambio,GPUArrayCambioAux,rows,columns); //El resultado de flagCambio se guarda en un array, hacemos reduccion en el host err1 = hipMemcpy(arrayCambio,GPUArrayCambioAux,numbloques*sizeof(int),hipMemcpyDeviceToHost); if(err1 != hipSuccess){ printf("Error copiando memoria al host %s\n",err1); return -1; } for(i=0;i<numbloques;i++){ flagCambio = arrayCambio[i]; if(flagCambio != 0) break; } #ifdef DEBUG printf("\nResultados iter %d: \n", t); for(i=0;i<rows;i++){ for(j=0;j<columns;j++){ printf ("%d\t", matrixResult[i*columns+j]); } printf("\n"); } #endif //printf("FlagCambio%d\n",flagCambio); } //Una vez terminada la computacin, se habr generado el matrixResult final en la GPU err1 = hipMemcpy(matrixResult,GPUmatrixResult,rows*columns*sizeof(int),hipMemcpyDeviceToHost); if(err1 != hipSuccess){ printf("Error copiando memoria al host %s\n",err1); return -1; } /* 4.3 Inicio cuenta del numero de bloques */ numBlocks=0; for(i=1;i<rows-1;i++){ for(j=1;j<columns-1;j++){ if(matrixResult[i*columns+j] == i*columns+j) numBlocks++; } } /* Liberacion de memoria*/ hipFree(GPUmatrixResult); hipFree(GPUmatrixData); hipFree(GPUmatrixResultCopy); hipFree(GPUArrayCambio); hipFree(GPUArrayCambioAux); // // EL CODIGO A PARALELIZAR TERMINA AQUI // /* PUNTO DE FINAL DE MEDIDA DE TIEMPO */ hipDeviceSynchronize(); double t_fin = cp_Wtime(); /* 5. Comprobacin de resultados */ double t_total = (double)(t_fin - t_ini); printf("Result: %d:%d\n", numBlocks, t); printf("Time: %lf\n", t_total); #ifdef WRITE printf("Resultado: \n"); for(i=0;i<rows;i++){ for(j=0;j<columns;j++){ printf ("%d\t", matrixResult[i*columns+j]); } printf("\n"); } #endif /* 6. Liberacion de memoria */ free(matrixData); free(matrixResult); free(matrixResultCopy); return 0; }
7b95bfe2039ec32488504c0f1f85ab071f8304fa.cu
/* * Contar cuerpos celestes * * Asignatura Computación Paralela (Grado Ingeniería Informática) * Código secuencial base * * @author Ana Moretón Fernández, Arturo Gonzalez-Escribano * @author Luis Higuero Casado, Esther Cuervo Fernández * @version v1.3 * * (c) 2017, Grupo Trasgo, Universidad de Valladolid */ #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #include "cputils.h" /* Substituir min por el operador */ #define min(x,y) ((x) < (y)? (x) : (y)) #define THREADSPORBLOQUE 128 /** * Funcion secuencial para la busqueda de mi bloque */ __device__ int computation(int x, int y, int columns, int* matrixData, int *matrixResult, int *matrixResultCopy){ // Inicialmente cojo mi indice int result=matrixResultCopy[x*columns+y]; if( result!= -1){ //Si es de mi mismo grupo, entonces actualizo if(matrixData[(x-1)*columns+y] == matrixData[x*columns+y]) { result = min (result, matrixResultCopy[(x-1)*columns+y]); } if(matrixData[(x+1)*columns+y] == matrixData[x*columns+y]) { result = min (result, matrixResultCopy[(x+1)*columns+y]); } if(matrixData[x*columns+y-1] == matrixData[x*columns+y]) { result = min (result, matrixResultCopy[x*columns+y-1]); } if(matrixData[x*columns+y+1] == matrixData[x*columns+y]) { result = min (result, matrixResultCopy[x*columns+y+1]); } // Si el indice no ha cambiado retorna 0 if(matrixResult[x*columns+y] == result){ return 0; } // Si el indice cambia, actualizo matrix de resultados con el indice adecuado y retorno 1 else { matrixResult[x*columns+y]=result; return 1;} } return 0; } /*Definicion de kernels*/ __global__ void etiquetadoInicial(int *matrixResult, int *matrixResultCopy, int *matrixData, int tamMatriz){ int indiceThread = (blockIdx.x*blockDim.x)+(threadIdx.x); if(indiceThread < tamMatriz){ matrixResultCopy[indiceThread] = -1; matrixResult[indiceThread] = -1; if(matrixData[indiceThread] != 0){ matrixResult[indiceThread] = indiceThread; } } } __global__ void actualizacionCopia(int *matrixResult, int *matrixResultCopy, int tamMatriz){ int indiceThread = (blockIdx.x*blockDim.x)+(threadIdx.x); if(indiceThread < tamMatriz){ if(matrixResult[indiceThread] != -1){ matrixResultCopy[indiceThread] = matrixResult[indiceThread]; } } } __global__ void computo(int *matrixResult, int *matrixResultCopy, int *matrixData, int filas, int columnas, int *arrayCambio){ int indiceThread = (blockIdx.x*blockDim.x)+(threadIdx.x); if(indiceThread < columnas*filas){ int x,y; x = indiceThread/columnas; //la fila en la que está la posicion es el entero resultante de dividir el indice por el tamaño de la fila... y = indiceThread%columnas; //y la columna es el resto arrayCambio[indiceThread] = computation(x, y, columnas, matrixData, matrixResult, matrixResultCopy); } } //Esta funcion suma todo su bloque en su posicion de auxiliar __global__ void recuento(int *arrayCambio, int *arrayAux, int rows , int columns){ int numbloque = blockIdx.x; int indiceThreadGlobal = (blockIdx.x*blockDim.x)+(threadIdx.x); int i; for (i=2;i<=blockDim.x;i=i*2){ arrayCambio[indiceThreadGlobal]=arrayCambio[indiceThreadGlobal]+arrayCambio[indiceThreadGlobal+i/2]; __syncthreads(); } if((indiceThreadGlobal%blockDim.x)==0){ arrayAux[numbloque]=arrayCambio[indiceThreadGlobal]; } } /*Fin de kernels*/ /** * Funcion principal */ int main (int argc, char* argv[]) { /* 1. Leer argumento y declaraciones */ if (argc < 2) { printf("Uso: %s <imagen_a_procesar>\n", argv[0]); return(EXIT_SUCCESS); } char* image_filename = argv[1]; int rows=-1; int columns =-1; int *matrixData=NULL; int *matrixResult=NULL; int *matrixResultCopy=NULL; int numBlocks=-1; /* 2. Leer Fichero de entrada e inicializar datos */ /* 2.1 Abrir fichero */ FILE *f = cp_abrir_fichero(image_filename); // Compruebo que no ha habido errores if (f==NULL) { perror ("Error al abrir fichero.txt"); return -1; } /* 2.2 Leo valores del fichero */ int i,j; fscanf (f, "%d\n", &rows); fscanf (f, "%d\n", &columns); // Añado dos filas y dos columnas mas para los bordes rows=rows+2; columns = columns+2; /* 2.3 Reservo la memoria necesaria para la matriz de datos */ matrixData= (int *)malloc( rows*(columns) * sizeof(int) ); if ( (matrixData == NULL) ) { perror ("Error reservando memoria"); return -1; } /* 2.4 Inicializo matrices */ for(i=0;i< rows; i++){ for(j=0;j< columns; j++){ matrixData[i*(columns)+j]=-1; } } /* 2.5 Relleno bordes de la matriz */ for(i=1;i<rows-1;i++){ matrixData[i*(columns)+0]=0; matrixData[i*(columns)+columns-1]=0; } for(i=1;i<columns-1;i++){ matrixData[0*(columns)+i]=0; matrixData[(rows-1)*(columns)+i]=0; } /* 2.6 Relleno la matriz con los datos del fichero */ for(i=1;i<rows-1;i++){ for(j=1;j<columns-1;j++){ fscanf (f, "%d\n", &matrixData[i*(columns)+j]); } } fclose(f); #ifdef WRITE printf("Inicializacion \n"); for(i=0;i<rows;i++){ for(j=0;j<columns;j++){ printf ("%d\t", matrixData[i*(columns)+j]); } printf("\n"); } #endif cudaSetDevice(0); cudaDeviceSynchronize(); /* PUNTO DE INICIO MEDIDA DE TIEMPO */ double t_ini = cp_Wtime(); // // EL CODIGO A PARALELIZAR COMIENZA AQUI // cudaError_t err1, err2, err3, err4, err5; //variables para comprobación de errores. int tamMatriz = rows*columns; int numbloques=tamMatriz/THREADSPORBLOQUE + (tamMatriz%THREADSPORBLOQUE != 0); matrixResult= (int *)malloc( (rows)*(columns) * sizeof(int) ); int *arrayCambio = (int *)malloc(numbloques*sizeof(int)); if ( (matrixResult == NULL) || (arrayCambio==NULL) ) { perror ("Error reservando memoria"); return -1; } /*Envio de las matrices a la GPU*/ //Inicializacion int *GPUmatrixResult; int *GPUmatrixResultCopy; int *GPUmatrixData; int *GPUArrayCambio; int *GPUArrayCambioAux; err1 = cudaMalloc(&GPUmatrixResult, rows*columns*sizeof(int)); err2 = cudaMalloc(&GPUmatrixResultCopy, rows*columns*sizeof(int)); err3 = cudaMalloc(&GPUmatrixData, rows*columns*sizeof(int)); err4 = cudaMalloc(&GPUArrayCambio, rows*columns*sizeof(int)); err5 = cudaMalloc(&GPUArrayCambioAux,numbloques*sizeof(int)); if(err1 != cudaSuccess || err2 != cudaSuccess || err3 != cudaSuccess || err4 != cudaSuccess || err5!=cudaSuccess){ printf("Error en el reservado de memoria GPU\n"); return -1; } //Envio a GPU err1 = cudaMemcpy(GPUmatrixData, matrixData, rows*columns*sizeof(int), cudaMemcpyHostToDevice); if(err1 != cudaSuccess){ printf("Error enviando las matrices a la GPU\n"); return -1; } /*Definicion de grids*/ dim3 bloque(THREADSPORBLOQUE,1); dim3 grid(numbloques,1); /* 3. Etiquetado inicial */ etiquetadoInicial<<<grid,bloque>>>(GPUmatrixResult, GPUmatrixResultCopy, GPUmatrixData,tamMatriz); /* 4. Computacion */ int t=0; /* 4.1 Flag para ver si ha habido cambios y si se continua la ejecucion */ int flagCambio=1; /* 4.2 Busqueda de los bloques similiares */ for(t=0; flagCambio !=0; t++){ flagCambio=0; /* 4.2.1 Actualizacion copia */ actualizacionCopia<<<grid,bloque>>>(GPUmatrixResult,GPUmatrixResultCopy,tamMatriz); /* 4.2.2 Computo y detecto si ha habido cambios */ computo<<<grid,bloque>>>(GPUmatrixResult, GPUmatrixResultCopy, GPUmatrixData, rows, columns, GPUArrayCambio); recuento<<<grid,bloque>>>(GPUArrayCambio,GPUArrayCambioAux,rows,columns); //El resultado de flagCambio se guarda en un array, hacemos reduccion en el host err1 = cudaMemcpy(arrayCambio,GPUArrayCambioAux,numbloques*sizeof(int),cudaMemcpyDeviceToHost); if(err1 != cudaSuccess){ printf("Error copiando memoria al host %s\n",err1); return -1; } for(i=0;i<numbloques;i++){ flagCambio = arrayCambio[i]; if(flagCambio != 0) break; } #ifdef DEBUG printf("\nResultados iter %d: \n", t); for(i=0;i<rows;i++){ for(j=0;j<columns;j++){ printf ("%d\t", matrixResult[i*columns+j]); } printf("\n"); } #endif //printf("FlagCambio%d\n",flagCambio); } //Una vez terminada la computación, se habrá generado el matrixResult final en la GPU err1 = cudaMemcpy(matrixResult,GPUmatrixResult,rows*columns*sizeof(int),cudaMemcpyDeviceToHost); if(err1 != cudaSuccess){ printf("Error copiando memoria al host %s\n",err1); return -1; } /* 4.3 Inicio cuenta del numero de bloques */ numBlocks=0; for(i=1;i<rows-1;i++){ for(j=1;j<columns-1;j++){ if(matrixResult[i*columns+j] == i*columns+j) numBlocks++; } } /* Liberacion de memoria*/ cudaFree(GPUmatrixResult); cudaFree(GPUmatrixData); cudaFree(GPUmatrixResultCopy); cudaFree(GPUArrayCambio); cudaFree(GPUArrayCambioAux); // // EL CODIGO A PARALELIZAR TERMINA AQUI // /* PUNTO DE FINAL DE MEDIDA DE TIEMPO */ cudaDeviceSynchronize(); double t_fin = cp_Wtime(); /* 5. Comprobación de resultados */ double t_total = (double)(t_fin - t_ini); printf("Result: %d:%d\n", numBlocks, t); printf("Time: %lf\n", t_total); #ifdef WRITE printf("Resultado: \n"); for(i=0;i<rows;i++){ for(j=0;j<columns;j++){ printf ("%d\t", matrixResult[i*columns+j]); } printf("\n"); } #endif /* 6. Liberacion de memoria */ free(matrixData); free(matrixResult); free(matrixResultCopy); return 0; }
a30f3b343e95744fe94c36702f08ef0af55fd079.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "func.hpp" __global__ void ke_add(float* a, float* b, float* dest) { dest[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } __host__ void cu_add(float* a, float* b, float* dest, size_t size) { hipLaunchKernelGGL(( ke_add) , dim3(1), dim3(size), 0, 0, a, b, dest); }
a30f3b343e95744fe94c36702f08ef0af55fd079.cu
#include "func.hpp" __global__ void ke_add(float* a, float* b, float* dest) { dest[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } __host__ void cu_add(float* a, float* b, float* dest, size_t size) { ke_add <<<1, size>>> (a, b, dest); }
54543bc9c4300c372c4c1509302795fc69f03371.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hashtable.cuh" #include "errorcheck.h" #include "lock_hip.cuh" #include <stdio.h> #include <cstring> __global__ void init_table::init_empty_table(Data * table, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; while (i < size) { auto ptr = table + i; ptr->lock.init(); ptr->key = 0; ptr->state = EMPTY; i += gridDim.x * blockDim.x; } } __device__ int HashFunction::h2(ULL x, int size) { x = ((x >> 16) ^ x) * 0x45d9f3b; x = ((x >> 16) ^ x) * 0x45d9f3b; x = (x >> 16) ^ x; return x % size; } __device__ int HashFunction::h1(ULL a, int size) { a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a % size; } HashTable::HashTable(int size) { this->size = size; gpuErrchk( hipMalloc(&table, size * sizeof(Data)) ); int threads_per_block = 32, blocks = (size/threads_per_block) + (size % threads_per_block != 0); hipLaunchKernelGGL(( init_table::init_empty_table), dim3(blocks), dim3(threads_per_block), 0, 0, table, size); } __device__ void HashTable::insert(ULL key, ThreadLog * status) { int N = this->size, h1 = HashFunction::h1(key, size), h2 = HashFunction::h2(key, size); int index = h1; while(N > 0){ auto current = (table+index); if (status) ++(status -> iterations[index]); if(current->state == FULL) { index = (index + h2) % size; N--; continue; } Thread oldThread = current->lock.lock(Thread::Insert); switch(oldThread) { case Thread::Null: break; default: index = (index + h2) % size; N--; continue; } __threadfence(); // Not sure if it is needed if(current->state != FULL) { current->state = FULL; current->key = key; current->lock.unlock(Thread::Insert); if (status) { status->final_index = index; status->returned = true; } return; // Can't guarantee that the element will be there after insert returns... } index = (index + h2) % size; N--; current->lock.unlock(Thread::Insert); } if (status) { status->final_index = index; status->returned = false; } } __device__ void HashTable::deleteKey(ULL key, ThreadLog * status) { int N = this->size; int h1 = HashFunction::h1(key, size); int h2 = HashFunction::h2(key, size); int index = h1; while (N > 0) { Data *current = table + index; if(status) ++(status -> iterations[index]); ULL currst = current->state; if( currst != FULL ) { if(currst == EMPTY) break; index = (index + h2) % size; N--; continue; } Thread oldThread = current->lock.lock(Thread::Delete); switch(oldThread) { case Thread::Null: break; case Thread::Insert: index = (index + h2) % size; N--; case Thread::Delete: case Thread::Find: continue; } __threadfence(); // Not sure if it is needed switch(current->state) { case FULL: if (current->key == key) { current->state = DELETED; current->lock.unlock(Thread::Delete); if (status) { status->final_index = index; status->returned = true; } return; } index = (index + h2) % size; N--; break; case DELETED: index = (index + h2) % size; N--; break; case EMPTY: current->lock.unlock(Thread::Delete); if (status) { status->final_index = index; status->returned = false; } return; default: printf("Unrecognized thread type\n"); } current->lock.unlock(Thread::Delete); } if (status) { status->final_index = index; status->returned = false; } } __device__ void HashTable::findKey(ULL key, ThreadLog * status) { int N = this->size; int h1 = HashFunction::h1(key, size); int h2 = HashFunction::h2(key, size); int index = h1; while (N > 0) { Data *current = table + index; if(status) ++(status -> iterations[index]); ULL currst = current->state; if( currst != FULL ) { if(currst == EMPTY) break; index = (index + h2) % size; N--; continue; } Thread oldThread = current->lock.lock(Thread::Find); switch(oldThread) { case Thread::Null: case Thread::Find: break; case Thread::Insert: index = (index + h2) % size; N--; case Thread::Delete: continue; } switch(current->state) { case FULL: if (current->key == key) { current->lock.unlock(Thread::Find); if (status) { status->final_index = index; status->returned = true; } return; } // No break; moves to next case case DELETED: current->lock.unlock(Thread::Find); index = (index + h2) % size; N--; break; case EMPTY: current->lock.unlock(Thread::Find); if (status) { status->final_index = index; status->returned = false; } return; } } if (status) { status->final_index = index; status->returned = false; } } void HashTable::performInstructs(HashTable *table, Instruction *ins, int numIns, ThreadLog * status) { int threads_per_block = 32; int blocks = (numIns + threads_per_block - 1) / threads_per_block; ThreadLog * d_status = nullptr; if (status) { gpuErrchk( hipMalloc(&d_status, numIns*sizeof(ThreadLog)) ); gpuErrchk( hipMemcpy(d_status, status, numIns*sizeof(ThreadLog), hipMemcpyDefault) ); } hipLaunchKernelGGL(( cu::performInstructs), dim3(blocks), dim3(threads_per_block), 0, 0, table, ins, numIns, d_status); if (status) { gpuErrchk( hipMemcpy(status, d_status, numIns*sizeof(ThreadLog), hipMemcpyDefault) ); gpuErrchk( hipFree(d_status) ); for (int i = 0; i < numIns; ++i) { (status + i)->fillhostarray(); } } } __global__ void cu::performInstructs( HashTable * table, Instruction *instructions, int numInstructions, ThreadLog * status) { for(int id = blockIdx.x * blockDim.x + threadIdx.x; id < numInstructions; id += blockDim.x * gridDim.x) { auto curr_status = status ? status + id : nullptr; switch(instructions[id].type) { case Instruction::Insert: table -> insert(instructions[id].key, curr_status); break; case Instruction::Delete: table -> deleteKey(instructions[id].key, curr_status); break; case Instruction::Find: table -> findKey(instructions[id].key, curr_status); break; } } } __global__ void printtt(HashTable *hashTable) { Data *table = hashTable->table; int size = hashTable->size; for (int i = 0; i < size; i++) { switch(table[i].state) { case FULL: printf("Idx%d: %lld\n", i, table[i].key); break; case DELETED: printf("Idx%d: DELETED\n", i); break; } } } void HashTable::print(HashTable *d_hashTable, ThreadLog * statuses, int statuses_size, std::ostream & out) { gpuErrchk( hipDeviceSynchronize() ); hipLaunchKernelGGL(( printtt), dim3(1), dim3(1), 0, 0, d_hashTable); for (int i = 0; i < statuses_size; ++i) { (statuses+i)->to_string(out << std::endl << i << ". \n"); } } HashTable::~HashTable() { gpuErrchk( hipFree(table) ); } ThreadLog::ThreadLog(int size, Instruction ins) { this->size = size; gpuErrchk( hipMalloc(&iterations, size*sizeof(int)) ); gpuErrchk( hipMemset(iterations, 0, size*sizeof(int)) ); final_index = -1; returned = false; h_iterations = new int[size]; instruction = ins; } ThreadLog::~ThreadLog() { hipFree(this->iterations); delete [] h_iterations; } void ThreadLog::to_string(std::ostream & out) { out << "Instruction: "; switch (instruction.type) { case Instruction::Insert : out << "INSERT "; break; case Instruction::Delete: out << "DELETE "; break; case Instruction::Find: out << "FIND "; break; default: out << "Unrecognized instruction given to thread!!\n"; return; } out << instruction.key << "\n" << (returned ? "Success\n" : "Failure\n"); out << "Iterations this thread spent per index:\n"; for(int i = 0; i < size; ++i) { out << h_iterations[i] << " | "; } out << "\nFinal Index = "; out << final_index << std::endl; } void ThreadLog::fillhostarray() { gpuErrchk( hipMemcpy(h_iterations, iterations, size*sizeof(int), hipMemcpyDefault) ); }
54543bc9c4300c372c4c1509302795fc69f03371.cu
#include "hashtable.cuh" #include "errorcheck.h" #include "lock.cuh" #include <stdio.h> #include <cstring> __global__ void init_table::init_empty_table(Data * table, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; while (i < size) { auto ptr = table + i; ptr->lock.init(); ptr->key = 0; ptr->state = EMPTY; i += gridDim.x * blockDim.x; } } __device__ int HashFunction::h2(ULL x, int size) { x = ((x >> 16) ^ x) * 0x45d9f3b; x = ((x >> 16) ^ x) * 0x45d9f3b; x = (x >> 16) ^ x; return x % size; } __device__ int HashFunction::h1(ULL a, int size) { a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a % size; } HashTable::HashTable(int size) { this->size = size; gpuErrchk( cudaMalloc(&table, size * sizeof(Data)) ); int threads_per_block = 32, blocks = (size/threads_per_block) + (size % threads_per_block != 0); init_table::init_empty_table<<<blocks, threads_per_block>>>(table, size); } __device__ void HashTable::insert(ULL key, ThreadLog * status) { int N = this->size, h1 = HashFunction::h1(key, size), h2 = HashFunction::h2(key, size); int index = h1; while(N > 0){ auto current = (table+index); if (status) ++(status -> iterations[index]); if(current->state == FULL) { index = (index + h2) % size; N--; continue; } Thread oldThread = current->lock.lock(Thread::Insert); switch(oldThread) { case Thread::Null: break; default: index = (index + h2) % size; N--; continue; } __threadfence(); // Not sure if it is needed if(current->state != FULL) { current->state = FULL; current->key = key; current->lock.unlock(Thread::Insert); if (status) { status->final_index = index; status->returned = true; } return; // Can't guarantee that the element will be there after insert returns... } index = (index + h2) % size; N--; current->lock.unlock(Thread::Insert); } if (status) { status->final_index = index; status->returned = false; } } __device__ void HashTable::deleteKey(ULL key, ThreadLog * status) { int N = this->size; int h1 = HashFunction::h1(key, size); int h2 = HashFunction::h2(key, size); int index = h1; while (N > 0) { Data *current = table + index; if(status) ++(status -> iterations[index]); ULL currst = current->state; if( currst != FULL ) { if(currst == EMPTY) break; index = (index + h2) % size; N--; continue; } Thread oldThread = current->lock.lock(Thread::Delete); switch(oldThread) { case Thread::Null: break; case Thread::Insert: index = (index + h2) % size; N--; case Thread::Delete: case Thread::Find: continue; } __threadfence(); // Not sure if it is needed switch(current->state) { case FULL: if (current->key == key) { current->state = DELETED; current->lock.unlock(Thread::Delete); if (status) { status->final_index = index; status->returned = true; } return; } index = (index + h2) % size; N--; break; case DELETED: index = (index + h2) % size; N--; break; case EMPTY: current->lock.unlock(Thread::Delete); if (status) { status->final_index = index; status->returned = false; } return; default: printf("Unrecognized thread type\n"); } current->lock.unlock(Thread::Delete); } if (status) { status->final_index = index; status->returned = false; } } __device__ void HashTable::findKey(ULL key, ThreadLog * status) { int N = this->size; int h1 = HashFunction::h1(key, size); int h2 = HashFunction::h2(key, size); int index = h1; while (N > 0) { Data *current = table + index; if(status) ++(status -> iterations[index]); ULL currst = current->state; if( currst != FULL ) { if(currst == EMPTY) break; index = (index + h2) % size; N--; continue; } Thread oldThread = current->lock.lock(Thread::Find); switch(oldThread) { case Thread::Null: case Thread::Find: break; case Thread::Insert: index = (index + h2) % size; N--; case Thread::Delete: continue; } switch(current->state) { case FULL: if (current->key == key) { current->lock.unlock(Thread::Find); if (status) { status->final_index = index; status->returned = true; } return; } // No break; moves to next case case DELETED: current->lock.unlock(Thread::Find); index = (index + h2) % size; N--; break; case EMPTY: current->lock.unlock(Thread::Find); if (status) { status->final_index = index; status->returned = false; } return; } } if (status) { status->final_index = index; status->returned = false; } } void HashTable::performInstructs(HashTable *table, Instruction *ins, int numIns, ThreadLog * status) { int threads_per_block = 32; int blocks = (numIns + threads_per_block - 1) / threads_per_block; ThreadLog * d_status = nullptr; if (status) { gpuErrchk( cudaMalloc(&d_status, numIns*sizeof(ThreadLog)) ); gpuErrchk( cudaMemcpy(d_status, status, numIns*sizeof(ThreadLog), cudaMemcpyDefault) ); } cu::performInstructs<<<blocks, threads_per_block>>>(table, ins, numIns, d_status); if (status) { gpuErrchk( cudaMemcpy(status, d_status, numIns*sizeof(ThreadLog), cudaMemcpyDefault) ); gpuErrchk( cudaFree(d_status) ); for (int i = 0; i < numIns; ++i) { (status + i)->fillhostarray(); } } } __global__ void cu::performInstructs( HashTable * table, Instruction *instructions, int numInstructions, ThreadLog * status) { for(int id = blockIdx.x * blockDim.x + threadIdx.x; id < numInstructions; id += blockDim.x * gridDim.x) { auto curr_status = status ? status + id : nullptr; switch(instructions[id].type) { case Instruction::Insert: table -> insert(instructions[id].key, curr_status); break; case Instruction::Delete: table -> deleteKey(instructions[id].key, curr_status); break; case Instruction::Find: table -> findKey(instructions[id].key, curr_status); break; } } } __global__ void printtt(HashTable *hashTable) { Data *table = hashTable->table; int size = hashTable->size; for (int i = 0; i < size; i++) { switch(table[i].state) { case FULL: printf("Idx%d: %lld\n", i, table[i].key); break; case DELETED: printf("Idx%d: DELETED\n", i); break; } } } void HashTable::print(HashTable *d_hashTable, ThreadLog * statuses, int statuses_size, std::ostream & out) { gpuErrchk( cudaDeviceSynchronize() ); printtt<<<1, 1>>>(d_hashTable); for (int i = 0; i < statuses_size; ++i) { (statuses+i)->to_string(out << std::endl << i << ". \n"); } } HashTable::~HashTable() { gpuErrchk( cudaFree(table) ); } ThreadLog::ThreadLog(int size, Instruction ins) { this->size = size; gpuErrchk( cudaMalloc(&iterations, size*sizeof(int)) ); gpuErrchk( cudaMemset(iterations, 0, size*sizeof(int)) ); final_index = -1; returned = false; h_iterations = new int[size]; instruction = ins; } ThreadLog::~ThreadLog() { cudaFree(this->iterations); delete [] h_iterations; } void ThreadLog::to_string(std::ostream & out) { out << "Instruction: "; switch (instruction.type) { case Instruction::Insert : out << "INSERT "; break; case Instruction::Delete: out << "DELETE "; break; case Instruction::Find: out << "FIND "; break; default: out << "Unrecognized instruction given to thread!!\n"; return; } out << instruction.key << "\n" << (returned ? "Success\n" : "Failure\n"); out << "Iterations this thread spent per index:\n"; for(int i = 0; i < size; ++i) { out << h_iterations[i] << " | "; } out << "\nFinal Index = "; out << final_index << std::endl; } void ThreadLog::fillhostarray() { gpuErrchk( cudaMemcpy(h_iterations, iterations, size*sizeof(int), cudaMemcpyDefault) ); }
310d12488f124b3f63b74a2b830d645588bbebc4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/rob_layer.hpp" #include "caffe/layers/downsample_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/rng.hpp" #include <iostream> #include <ctime> #include <math.h> using namespace std; // inputs: 320 * 768 // mode 0: 320 * 768 --> 128 * 384 // mode 1: 160 * 384 --> 128 * 384 // mode 2: 80 * 192 --> 128 * 384 // output: 128 * 384 namespace caffe { template <typename Dtype> __global__ void crop_copy_gpu(const int nthreads, int N, int C, int bH, int bW, int tH, int tW, const Dtype* bottom_data, Dtype* top_data, const int offsets_w, const int offsets_h, const Dtype pad_value) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % tW; // W index const int s = (index / tW) % tH; // H index const int j = (index / (tW * tH)) % C; // C index const int i = index / (tW * tH * C); // N index int nt = t + offsets_w; int ns = s + offsets_h; int b_index = i * (C * bH * bW) + j * (bH * bW) + bW * ns + nt; if((0<= nt & nt < bW) && (0<= ns & ns < bH)) top_data[index] = bottom_data[b_index]; //top_data[index] = Dtype(0); else top_data[index] = pad_value; } } template <typename Dtype> void ROBLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { for (int i = 0; i < bottom.size(); ++i) { caffe_copy(bottom[i]->count(), bottom[i]->gpu_data(), bottom_copy_[i].get()->mutable_gpu_data()); } std::srand ((unsigned)time(NULL)); // Random select segmentation point Dtype seg = (static_cast<Dtype>(caffe_rng_rand()) / RAND_MAX); seg = seg - int(seg); cout<<"seg = "<<seg<<endl; int dataset = num_dataset_ - 1; for(int i=0; i < num_dataset_; i++){ if(coeffs_[i] <= seg && seg < coeffs_[i+1]){ dataset = i; break; } } // Random select operation mode Dtype select_mode = static_cast<Dtype>(caffe_rng_rand()) / RAND_MAX; select_mode = select_mode - int(select_mode); cout<<"select_mode = "<<select_mode<<endl; int mode = 2; for(int i=0; i < 3; i++){ if(i/Dtype(3.0) <= select_mode && select_mode < (i+1)/Dtype(3.0)){ mode = i; break; } } // In our implementation, there are 4 datasets, i.e., SceneFlow, KITTI, MiddleBury, eth3d. // There 3 modes. // in mode 0, we crop image in the original resolution, and output image whose resolution is // in mode 1, we first downsample the images to 1/2, then crop. The output images' resolution is // in mode 2, we first downsample the images to 1/4, then crop. The output images' resolution is // Downsample // mode = 0; cout<<"Choosing dataset = "<<dataset<<endl; cout<<"Operation mode = "<<mode<<endl; downsample_left_half_layer_[dataset]->Forward(downsample_left_half_bottom_vec_[dataset], downsample_left_half_top_vec_[dataset]); downsample_right_half_layer_[dataset]->Forward(downsample_right_half_bottom_vec_[dataset], downsample_right_half_top_vec_[dataset]); downsample_disp_half_layer_[dataset]->Forward(downsample_disp_half_bottom_vec_[dataset], downsample_disp_half_top_vec_[dataset]); downsample_left_quarter_layer_[dataset]->Forward(downsample_left_quarter_bottom_vec_[dataset], downsample_left_quarter_top_vec_[dataset]); downsample_right_quarter_layer_[dataset]->Forward(downsample_right_quarter_bottom_vec_[dataset], downsample_right_quarter_top_vec_[dataset]); downsample_disp_quarter_layer_[dataset]->Forward(downsample_disp_quarter_bottom_vec_[dataset], downsample_disp_quarter_top_vec_[dataset]); const Dtype* tmp_left_data; const Dtype* tmp_right_data; const Dtype* tmp_disp_data; if(mode == 1){ cout<<"half!!"<<endl; tmp_left_data = downsampled_left_half_[dataset].get()->gpu_data(); tmp_right_data = downsampled_right_half_[dataset].get()->gpu_data(); tmp_disp_data = downsampled_disp_half_[dataset].get()->gpu_data(); } if(mode == 2){ cout<<"quarter!!"<<endl; tmp_left_data = downsampled_left_quarter_[dataset].get()->gpu_data(); tmp_right_data = downsampled_right_quarter_[dataset].get()->gpu_data(); tmp_disp_data = downsampled_disp_quarter_[dataset].get()->gpu_data(); } if(mode == 0){ tmp_left_data = bottom[dataset*3]->gpu_data(); tmp_right_data = bottom[dataset*3+1]->gpu_data(); tmp_disp_data = bottom[dataset*3+2]->gpu_data(); } LOG(INFO) << ("crop"); // Random crop int bW = bottom[0]->width(); int bH = bottom[0]->height(); if(mode == 1){ bW = width_half_; bH = height_half_;} if(mode == 2){ bW = width_quarter_; bH = height_quarter_; } const int tW = target_width_; const int tH = target_height_; const int iC = bottom[0]->channels(); // image const int dC = bottom[2]->channels(); // disp const int N = bottom[0]->num(); const int image_count = top[0]->count(); const int disp_count = top[2]->count(); // Random offsets offsets[0] = int(caffe_rng_rand()); offsets[1] = int(caffe_rng_rand()); // offsets[1] = static_cast<int>(abs(caffe_rng_rand())); cout<<"random offsets = ("<<offsets[0]<<","<<offsets[1]<<")"<<endl; if(bW > tW){ offsets[0] = offsets[0] % (bW - tW); offsets[0] = abs(offsets[0]); }else if( bW == tW){ offsets[0] = 0; }else{ offsets[0] = static_cast<int>((bW - tW)/2); } if(bH > tH){ offsets[1] = offsets[1] % (bH - tH); offsets[1] = abs(offsets[1]); }else if( bH == tH){ offsets[1] = 0; }else{ offsets[1] = static_cast<int>((bH - tH)/2); } cout<<"dC, bH, bW, tH, tW = (" <<dC<<", "<<bH<<", "<<bW<<", "<<tH<<", "<<tW<<")"<<endl; cout<<"final offsets = ("<<offsets[0]<<","<<offsets[1]<<")"<<endl; caffe_gpu_set(image_count, (Dtype)0., top[0]->mutable_gpu_data()); caffe_gpu_set(image_count, (Dtype)0., top[1]->mutable_gpu_data()); caffe_gpu_set(disp_count, (Dtype)0., top[2]->mutable_gpu_data()); //LOG(INFO) << "Bottom shape: " << downsampled_left_half_[dataset].get()->shape_string(); //LOG(INFO) << "Top shape: " << top[0]->shape_string(); hipLaunchKernelGGL(( crop_copy_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(image_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, image_count, N, iC, bH, bW, tH, tW, tmp_left_data, top[0]->mutable_gpu_data(), offsets[0], offsets[1], Dtype(0.)); hipLaunchKernelGGL(( crop_copy_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(image_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, image_count, N, iC, bH, bW, tH, tW, tmp_right_data, top[1]->mutable_gpu_data(), offsets[0], offsets[1], Dtype(0.)); hipLaunchKernelGGL(( crop_copy_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(disp_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, disp_count, N, dC, bH, bW, tH, tW, tmp_disp_data, top[2]->mutable_gpu_data(), offsets[0], offsets[1], std::numeric_limits<Dtype>::signaling_NaN()); //LOG(INFO) << ("Done."); } template <typename Dtype> void ROBLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { return; } INSTANTIATE_LAYER_GPU_FUNCS(ROBLayer); } // namespace caffe
310d12488f124b3f63b74a2b830d645588bbebc4.cu
#include <cfloat> #include <vector> #include "caffe/layers/rob_layer.hpp" #include "caffe/layers/downsample_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/rng.hpp" #include <iostream> #include <ctime> #include <math.h> using namespace std; // inputs: 320 * 768 // mode 0: 320 * 768 --> 128 * 384 // mode 1: 160 * 384 --> 128 * 384 // mode 2: 80 * 192 --> 128 * 384 // output: 128 * 384 namespace caffe { template <typename Dtype> __global__ void crop_copy_gpu(const int nthreads, int N, int C, int bH, int bW, int tH, int tW, const Dtype* bottom_data, Dtype* top_data, const int offsets_w, const int offsets_h, const Dtype pad_value) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % tW; // W index const int s = (index / tW) % tH; // H index const int j = (index / (tW * tH)) % C; // C index const int i = index / (tW * tH * C); // N index int nt = t + offsets_w; int ns = s + offsets_h; int b_index = i * (C * bH * bW) + j * (bH * bW) + bW * ns + nt; if((0<= nt & nt < bW) && (0<= ns & ns < bH)) top_data[index] = bottom_data[b_index]; //top_data[index] = Dtype(0); else top_data[index] = pad_value; } } template <typename Dtype> void ROBLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { for (int i = 0; i < bottom.size(); ++i) { caffe_copy(bottom[i]->count(), bottom[i]->gpu_data(), bottom_copy_[i].get()->mutable_gpu_data()); } std::srand ((unsigned)time(NULL)); // Random select segmentation point Dtype seg = (static_cast<Dtype>(caffe_rng_rand()) / RAND_MAX); seg = seg - int(seg); cout<<"seg = "<<seg<<endl; int dataset = num_dataset_ - 1; for(int i=0; i < num_dataset_; i++){ if(coeffs_[i] <= seg && seg < coeffs_[i+1]){ dataset = i; break; } } // Random select operation mode Dtype select_mode = static_cast<Dtype>(caffe_rng_rand()) / RAND_MAX; select_mode = select_mode - int(select_mode); cout<<"select_mode = "<<select_mode<<endl; int mode = 2; for(int i=0; i < 3; i++){ if(i/Dtype(3.0) <= select_mode && select_mode < (i+1)/Dtype(3.0)){ mode = i; break; } } // In our implementation, there are 4 datasets, i.e., SceneFlow, KITTI, MiddleBury, eth3d. // There 3 modes. // in mode 0, we crop image in the original resolution, and output image whose resolution is // in mode 1, we first downsample the images to 1/2, then crop. The output images' resolution is // in mode 2, we first downsample the images to 1/4, then crop. The output images' resolution is // Downsample // mode = 0; cout<<"Choosing dataset = "<<dataset<<endl; cout<<"Operation mode = "<<mode<<endl; downsample_left_half_layer_[dataset]->Forward(downsample_left_half_bottom_vec_[dataset], downsample_left_half_top_vec_[dataset]); downsample_right_half_layer_[dataset]->Forward(downsample_right_half_bottom_vec_[dataset], downsample_right_half_top_vec_[dataset]); downsample_disp_half_layer_[dataset]->Forward(downsample_disp_half_bottom_vec_[dataset], downsample_disp_half_top_vec_[dataset]); downsample_left_quarter_layer_[dataset]->Forward(downsample_left_quarter_bottom_vec_[dataset], downsample_left_quarter_top_vec_[dataset]); downsample_right_quarter_layer_[dataset]->Forward(downsample_right_quarter_bottom_vec_[dataset], downsample_right_quarter_top_vec_[dataset]); downsample_disp_quarter_layer_[dataset]->Forward(downsample_disp_quarter_bottom_vec_[dataset], downsample_disp_quarter_top_vec_[dataset]); const Dtype* tmp_left_data; const Dtype* tmp_right_data; const Dtype* tmp_disp_data; if(mode == 1){ cout<<"half!!"<<endl; tmp_left_data = downsampled_left_half_[dataset].get()->gpu_data(); tmp_right_data = downsampled_right_half_[dataset].get()->gpu_data(); tmp_disp_data = downsampled_disp_half_[dataset].get()->gpu_data(); } if(mode == 2){ cout<<"quarter!!"<<endl; tmp_left_data = downsampled_left_quarter_[dataset].get()->gpu_data(); tmp_right_data = downsampled_right_quarter_[dataset].get()->gpu_data(); tmp_disp_data = downsampled_disp_quarter_[dataset].get()->gpu_data(); } if(mode == 0){ tmp_left_data = bottom[dataset*3]->gpu_data(); tmp_right_data = bottom[dataset*3+1]->gpu_data(); tmp_disp_data = bottom[dataset*3+2]->gpu_data(); } LOG(INFO) << ("crop"); // Random crop int bW = bottom[0]->width(); int bH = bottom[0]->height(); if(mode == 1){ bW = width_half_; bH = height_half_;} if(mode == 2){ bW = width_quarter_; bH = height_quarter_; } const int tW = target_width_; const int tH = target_height_; const int iC = bottom[0]->channels(); // image const int dC = bottom[2]->channels(); // disp const int N = bottom[0]->num(); const int image_count = top[0]->count(); const int disp_count = top[2]->count(); // Random offsets offsets[0] = int(caffe_rng_rand()); offsets[1] = int(caffe_rng_rand()); // offsets[1] = static_cast<int>(abs(caffe_rng_rand())); cout<<"random offsets = ("<<offsets[0]<<","<<offsets[1]<<")"<<endl; if(bW > tW){ offsets[0] = offsets[0] % (bW - tW); offsets[0] = abs(offsets[0]); }else if( bW == tW){ offsets[0] = 0; }else{ offsets[0] = static_cast<int>((bW - tW)/2); } if(bH > tH){ offsets[1] = offsets[1] % (bH - tH); offsets[1] = abs(offsets[1]); }else if( bH == tH){ offsets[1] = 0; }else{ offsets[1] = static_cast<int>((bH - tH)/2); } cout<<"dC, bH, bW, tH, tW = (" <<dC<<", "<<bH<<", "<<bW<<", "<<tH<<", "<<tW<<")"<<endl; cout<<"final offsets = ("<<offsets[0]<<","<<offsets[1]<<")"<<endl; caffe_gpu_set(image_count, (Dtype)0., top[0]->mutable_gpu_data()); caffe_gpu_set(image_count, (Dtype)0., top[1]->mutable_gpu_data()); caffe_gpu_set(disp_count, (Dtype)0., top[2]->mutable_gpu_data()); //LOG(INFO) << "Bottom shape: " << downsampled_left_half_[dataset].get()->shape_string(); //LOG(INFO) << "Top shape: " << top[0]->shape_string(); crop_copy_gpu<Dtype><<<CAFFE_GET_BLOCKS(image_count), CAFFE_CUDA_NUM_THREADS>>>(image_count, N, iC, bH, bW, tH, tW, tmp_left_data, top[0]->mutable_gpu_data(), offsets[0], offsets[1], Dtype(0.)); crop_copy_gpu<Dtype><<<CAFFE_GET_BLOCKS(image_count), CAFFE_CUDA_NUM_THREADS>>>(image_count, N, iC, bH, bW, tH, tW, tmp_right_data, top[1]->mutable_gpu_data(), offsets[0], offsets[1], Dtype(0.)); crop_copy_gpu<Dtype><<<CAFFE_GET_BLOCKS(disp_count), CAFFE_CUDA_NUM_THREADS>>>(disp_count, N, dC, bH, bW, tH, tW, tmp_disp_data, top[2]->mutable_gpu_data(), offsets[0], offsets[1], std::numeric_limits<Dtype>::signaling_NaN()); //LOG(INFO) << ("Done."); } template <typename Dtype> void ROBLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { return; } INSTANTIATE_LAYER_GPU_FUNCS(ROBLayer); } // namespace caffe
5ce9015c883120092e2cb21e285870833966d19f.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simulator.h" #include "model.h" //#include "realm/runtime_impl.h" //#include "realm/cuda/cuda_module.h" #include "cuda_helper.h" typedef long long int coord_t; typedef Realm::Point<1, coord_t> Point1; typedef Realm::Rect<1, coord_t> Rect1; Simulator::Simulator(const FFModel* model, FFHandler _handler, Memory _memory, MachineModel *machine) : memory(_memory), handler(_handler), offset(0), warmup_times(5), repeat_times(10), computationMode(model->config.computationMode) { // Allocate simulator memory Rect1 bounds(Point1(0), Point1(0)); std::vector<size_t> field_sizes; field_sizes.push_back(model->config.simulator_work_space_size); Realm::RegionInstance::create_instance(simulatorInst, memory, bounds, field_sizes, 0, Realm::ProfilingRequestSet()).wait(); base_ptr = (char*)simulatorInst.pointer_untyped(0, sizeof(char)); capacity = model->config.simulator_work_space_size; size_t max_num_tasks = 1024 * 1024; hipEventCreate(&start_event); hipEventCreate(&end_event); conv2d_meta = new Conv2DMeta(handler); linear_meta = new LinearMeta(handler, 4096); pool2d_meta = new Pool2DMeta(handler); ele_unary_meta = new ElementUnaryMeta(handler); ele_binary_meta = new ElementBinaryMeta(handler); //softmax_meta = new SoftmaxMeta(handler); batch_matmul_meta = new BatchMatmulMeta(handler); concat_meta = new ConcatMeta(handler); //dropout_meta = new DropoutMeta(handler); transpose_meta = new TransposeMeta(handler); this->machine = machine; segment_size = model->config.simulator_segment_size; max_num_segments = model->config.simulator_max_num_segments; // Initialize task manager task_manager = new TaskManager(max_num_tasks); } Simulator::~Simulator(void) { simulatorInst.destroy(); } __host__ void Simulator::strategy_search_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const FFModel* model = *((FFModel**) task->args); Memory gpu_mem = Machine::MemoryQuery(Machine::get_machine()) .only_kind(Memory::GPU_FB_MEM).best_affinity_to(task->target_proc).first(); // Realm::MemoryImpl* memImpl = // Realm::get_runtime()->get_memory_impl(gpu_mem); // Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl; // off_t offset = memFBImpl->alloc_bytes_local(model->config.simulator_work_space_size); // void* base_ptr = memFBImpl->get_direct_ptr(offset, 0); MachineModel *machine; if (model->config.machine_model_version == 0) { machine = (MachineModel *) new SimpleMachineModel(model->config.numNodes, model->config.workersPerNode, gpu_mem.capacity()); } else if (model->config.machine_model_version == 1 and !model->config.machine_model_file.empty()) { machine = (MachineModel *) new EnhancedMachineModel(model->config.machine_model_file, gpu_mem.capacity()); } else { assert(false && "machine model creation error: currently only support machine-model-version = 0 or 1. When machine-model-version = 1, machine-model-file should not be empty."); } // Assume this task is running on GPU0 Simulator* simulator = new Simulator(model, model->handlers[0], gpu_mem, machine); // Set cublas/cudnn streams to allow Realm catch the events #ifndef DISABLE_LEGION_CUDA_HIJACK hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDA(hipblasSetStream(simulator->handler.blas, stream)); checkCUDNN(cudnnSetStream(simulator->handler.dnn, stream)); #endif std::map<Op*, ParallelConfig> strategies; if (model->config.import_strategy_file.length() > 0) { // Load the strategy from config.strategies for (size_t l = 0; l < model->layers.size(); l++) { MappingTagID key = FFConfig::get_hash_id(std::string(model->layers[l]->name)); std::map<MappingTagID, ParallelConfig>::const_iterator iter; iter = model->config.strategies.find(key); if (iter == model->config.strategies.end()) { fprintf(stderr, "ERROR: Cannot find strategy for operator %s in " "strategy file %s\n", model->layers[l]->name, model->config.import_strategy_file.c_str()); } strategies[model->layers[l]] = iter->second; } } else { // Start from data parallel for (size_t l = 0; l < model->layers.size(); l++) { strategies[model->layers[l]] = model->layers[l]->get_data_parallel_config(*model); } } if (model->config.computationMode == COMP_MODE_TRAINING) { fprintf(stderr, "MCMC search configuration: budget(%zu) alpha(%.8lf) mode(TRAINING)\n", model->config.search_budget, model->config.search_alpha); } else { fprintf(stderr, "MCMC search configuration: budget(%zu) alpha(%.8lf) mode(INFERENCE)\n", model->config.search_budget, model->config.search_alpha); } model->optimize(simulator, strategies, model->config.search_budget, model->config.search_alpha, model->config.computationMode, model->config.enable_propagation); if (model->config.export_strategy_file.length() > 0) { fprintf(stderr, "Exporting the best discovered strategy to %s.\n", model->config.export_strategy_file.c_str()); std::map<Op*, ParallelConfig>::const_iterator iter; std::map<std::string, ParallelConfig> strategy_output; for (iter = strategies.begin(); iter != strategies.end(); iter++) { strategy_output[iter->first->name] = iter->second; } save_strategies_to_file(model->config.export_strategy_file, strategy_output); fprintf(stderr, "To use the strategy for distributed training, restart" " FlexFlow and import the strategy (i.e., --import %s)\n", model->config.export_strategy_file.c_str()); exit(0); } else { fprintf(stderr, "The best discovered strategy is not exported.\n" "Please set a path to export the strategy using --export or --export-strategy.\n"); exit(0); } // Start from data // memFBImpl->free_bytes_local(offset, model->config.simulator_work_space_size); delete(simulator); delete(machine); }
5ce9015c883120092e2cb21e285870833966d19f.cu
/* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simulator.h" #include "model.h" //#include "realm/runtime_impl.h" //#include "realm/cuda/cuda_module.h" #include "cuda_helper.h" typedef long long int coord_t; typedef Realm::Point<1, coord_t> Point1; typedef Realm::Rect<1, coord_t> Rect1; Simulator::Simulator(const FFModel* model, FFHandler _handler, Memory _memory, MachineModel *machine) : memory(_memory), handler(_handler), offset(0), warmup_times(5), repeat_times(10), computationMode(model->config.computationMode) { // Allocate simulator memory Rect1 bounds(Point1(0), Point1(0)); std::vector<size_t> field_sizes; field_sizes.push_back(model->config.simulator_work_space_size); Realm::RegionInstance::create_instance(simulatorInst, memory, bounds, field_sizes, 0, Realm::ProfilingRequestSet()).wait(); base_ptr = (char*)simulatorInst.pointer_untyped(0, sizeof(char)); capacity = model->config.simulator_work_space_size; size_t max_num_tasks = 1024 * 1024; cudaEventCreate(&start_event); cudaEventCreate(&end_event); conv2d_meta = new Conv2DMeta(handler); linear_meta = new LinearMeta(handler, 4096); pool2d_meta = new Pool2DMeta(handler); ele_unary_meta = new ElementUnaryMeta(handler); ele_binary_meta = new ElementBinaryMeta(handler); //softmax_meta = new SoftmaxMeta(handler); batch_matmul_meta = new BatchMatmulMeta(handler); concat_meta = new ConcatMeta(handler); //dropout_meta = new DropoutMeta(handler); transpose_meta = new TransposeMeta(handler); this->machine = machine; segment_size = model->config.simulator_segment_size; max_num_segments = model->config.simulator_max_num_segments; // Initialize task manager task_manager = new TaskManager(max_num_tasks); } Simulator::~Simulator(void) { simulatorInst.destroy(); } __host__ void Simulator::strategy_search_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const FFModel* model = *((FFModel**) task->args); Memory gpu_mem = Machine::MemoryQuery(Machine::get_machine()) .only_kind(Memory::GPU_FB_MEM).best_affinity_to(task->target_proc).first(); // Realm::MemoryImpl* memImpl = // Realm::get_runtime()->get_memory_impl(gpu_mem); // Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl; // off_t offset = memFBImpl->alloc_bytes_local(model->config.simulator_work_space_size); // void* base_ptr = memFBImpl->get_direct_ptr(offset, 0); MachineModel *machine; if (model->config.machine_model_version == 0) { machine = (MachineModel *) new SimpleMachineModel(model->config.numNodes, model->config.workersPerNode, gpu_mem.capacity()); } else if (model->config.machine_model_version == 1 and !model->config.machine_model_file.empty()) { machine = (MachineModel *) new EnhancedMachineModel(model->config.machine_model_file, gpu_mem.capacity()); } else { assert(false && "machine model creation error: currently only support machine-model-version = 0 or 1. When machine-model-version = 1, machine-model-file should not be empty."); } // Assume this task is running on GPU0 Simulator* simulator = new Simulator(model, model->handlers[0], gpu_mem, machine); // Set cublas/cudnn streams to allow Realm catch the events #ifndef DISABLE_LEGION_CUDA_HIJACK cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(simulator->handler.blas, stream)); checkCUDNN(cudnnSetStream(simulator->handler.dnn, stream)); #endif std::map<Op*, ParallelConfig> strategies; if (model->config.import_strategy_file.length() > 0) { // Load the strategy from config.strategies for (size_t l = 0; l < model->layers.size(); l++) { MappingTagID key = FFConfig::get_hash_id(std::string(model->layers[l]->name)); std::map<MappingTagID, ParallelConfig>::const_iterator iter; iter = model->config.strategies.find(key); if (iter == model->config.strategies.end()) { fprintf(stderr, "ERROR: Cannot find strategy for operator %s in " "strategy file %s\n", model->layers[l]->name, model->config.import_strategy_file.c_str()); } strategies[model->layers[l]] = iter->second; } } else { // Start from data parallel for (size_t l = 0; l < model->layers.size(); l++) { strategies[model->layers[l]] = model->layers[l]->get_data_parallel_config(*model); } } if (model->config.computationMode == COMP_MODE_TRAINING) { fprintf(stderr, "MCMC search configuration: budget(%zu) alpha(%.8lf) mode(TRAINING)\n", model->config.search_budget, model->config.search_alpha); } else { fprintf(stderr, "MCMC search configuration: budget(%zu) alpha(%.8lf) mode(INFERENCE)\n", model->config.search_budget, model->config.search_alpha); } model->optimize(simulator, strategies, model->config.search_budget, model->config.search_alpha, model->config.computationMode, model->config.enable_propagation); if (model->config.export_strategy_file.length() > 0) { fprintf(stderr, "Exporting the best discovered strategy to %s.\n", model->config.export_strategy_file.c_str()); std::map<Op*, ParallelConfig>::const_iterator iter; std::map<std::string, ParallelConfig> strategy_output; for (iter = strategies.begin(); iter != strategies.end(); iter++) { strategy_output[iter->first->name] = iter->second; } save_strategies_to_file(model->config.export_strategy_file, strategy_output); fprintf(stderr, "To use the strategy for distributed training, restart" " FlexFlow and import the strategy (i.e., --import %s)\n", model->config.export_strategy_file.c_str()); exit(0); } else { fprintf(stderr, "The best discovered strategy is not exported.\n" "Please set a path to export the strategy using --export or --export-strategy.\n"); exit(0); } // Start from data // memFBImpl->free_bytes_local(offset, model->config.simulator_work_space_size); delete(simulator); delete(machine); }
e9dcca4c6b1111c64c3f372cdfa0100c8dd8abb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=1 --blockDim=2 --no-inline //This kernel is racy. // //The memcpy resolves to a non-integer number of element writes so we have to //handle the arrays in and out at the byte-level. #define memcpy(dst, src, len) __builtin_memcpy(dst, src, len) typedef struct { short x; short y; } s_t; //< sizeof(s_t) == 4 __global__ void k(s_t *in, s_t *out) { size_t len = 5; memcpy(&out[threadIdx.x], &in[threadIdx.x], len); }
e9dcca4c6b1111c64c3f372cdfa0100c8dd8abb4.cu
//pass //--gridDim=1 --blockDim=2 --no-inline //This kernel is racy. // //The memcpy resolves to a non-integer number of element writes so we have to //handle the arrays in and out at the byte-level. #define memcpy(dst, src, len) __builtin_memcpy(dst, src, len) typedef struct { short x; short y; } s_t; //< sizeof(s_t) == 4 __global__ void k(s_t *in, s_t *out) { size_t len = 5; memcpy(&out[threadIdx.x], &in[threadIdx.x], len); }
9e654a631345ab0bc1cea259792fb5aade122115.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ __launch_bounds__(256) void sgemm_tn_128x128_vec( const float *param_A, const float *param_B, float *param_C, float param_alpha, float param_beta, int param_lda8, int param_ldb8, int param_ldc, int param_m, int param_n, int param_k) { __shared__ float share[128 * 8 * 4 + 32]; int tid = threadIdx.x; share[tid] = 1; }
9e654a631345ab0bc1cea259792fb5aade122115.cu
extern "C" __global__ __launch_bounds__(256) void sgemm_tn_128x128_vec( const float *param_A, const float *param_B, float *param_C, float param_alpha, float param_beta, int param_lda8, int param_ldb8, int param_ldc, int param_m, int param_n, int param_k) { __shared__ float share[128 * 8 * 4 + 32]; int tid = threadIdx.x; share[tid] = 1; }
54c502f1c2983566b59b4049c15dae9c61b553a5.hip
// !!! This is a file automatically generated by hipify!!! #include "GPUErrors.h" bool HandleCUDAError(hipError_t t) { if (t != hipSuccess) { cout << hipGetErrorString(hipGetLastError()); return false; } return true; } bool GetCUDARunTimeError() { hipError_t t = hipGetLastError(); if (t != hipSuccess) { cout << hipGetErrorString(t) << endl; return false; } return true; }
54c502f1c2983566b59b4049c15dae9c61b553a5.cu
#include "GPUErrors.h" bool HandleCUDAError(cudaError_t t) { if (t != cudaSuccess) { cout << cudaGetErrorString(cudaGetLastError()); return false; } return true; } bool GetCUDARunTimeError() { cudaError_t t = cudaGetLastError(); if (t != cudaSuccess) { cout << cudaGetErrorString(t) << endl; return false; } return true; }
6712a762c1464c027b229b961b620f126048eb2e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <cutil.h> #include <sys/time.h> #include "../lib/bed.h" #include "../lib/set_intersect.h" #include "radixsort.h" //#include "gpu.hpp" #include "random.hpp" #include "../lib/timer.h" #include "set_intersect_cuda.h" int main(int argc, char *argv[]) { if (argc < 6) { fprintf(stderr, "usage: order <u> <a> <b> " "<reps> <inter N> <sum N> <device>\n" "e.g., order U.bed A.bed B.bed 10000 1 1024 1\n"); return 1; } int chrom_num = 24; CUDA_SAFE_CALL( hipSetDevice( atoi(argv[7] ) ) ); /***********************REPLACE WITH INPUT FILE************************/ char *chrom_names[] = { "chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY" }; /**********************************************************************/ struct chr_list *U, *A, *B; char *U_file = argv[1], *A_file = argv[2], *B_file = argv[3]; int reps = atoi(argv[4]); int inter_threads = atoi(argv[5]); int sum_threads = atoi(argv[6]); if ( ( chr_list_from_bed_file(&U, chrom_names, chrom_num, U_file) == 1) || ( chr_list_from_bed_file(&A, chrom_names, chrom_num, A_file) == 1) || ( chr_list_from_bed_file(&B, chrom_names, chrom_num, B_file) == 1) ) { fprintf(stderr, "Error parsing bed files.\n"); return 1; } unsigned int max = add_offsets(U, chrom_num); trim(U, A, chrom_num); trim(U, B, chrom_num); int A_size, B_size, U_size; struct bed_line *U_array, *A_array, *B_array; U_size = chr_array_from_list(U, &U_array, chrom_num); A_size = chr_array_from_list(A, &A_array, chrom_num); B_size = chr_array_from_list(B, &B_array, chrom_num); unsigned int *A_key_h = (unsigned int *) malloc( (A_size) * sizeof(unsigned int)); unsigned int *A_val_h = (unsigned int *) malloc( (A_size) * sizeof(unsigned int)); unsigned int *B_key_h = (unsigned int *) malloc( (B_size) * sizeof(unsigned int)); unsigned int *B_val_h = (unsigned int *) malloc( (B_size) * sizeof(unsigned int)); /* * In CUDA we can sort key value pairs, * the key can be the offset, and the value can be the length */ set_start_len( U_array, U_size, A_array, A_key_h, A_val_h, A_size ); set_start_len( U_array, U_size, B_array, B_key_h, B_val_h, B_size ); // Move A and B to deviceB unsigned int *A_key_d, *A_val_d, *B_key_d, *B_val_d; hipMalloc((void **)&A_key_d, (A_size)*sizeof(unsigned int)); hipMalloc((void **)&A_val_d, (A_size)*sizeof(unsigned int)); hipMalloc((void **)&B_key_d, (B_size)*sizeof(unsigned int)); hipMalloc((void **)&B_val_d, (B_size)*sizeof(unsigned int)); start(); hipMemcpy(A_key_d, A_key_h, (A_size) * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(A_val_d, A_val_h, (A_size) * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(B_key_d, B_key_h, (B_size) * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(B_val_d, B_val_h, (B_size) * sizeof(unsigned int), hipMemcpyHostToDevice); stop(); // R will hold the results of the intersection, for each interval A[i], // R[i] will be the number of intervals in B that A[i] intersects, unsigned int *R_d; hipMalloc((void **)&R_d, (A_size)*sizeof(unsigned int)); unsigned long memup_time = report(); int block_size = 256; dim3 dimBlock(block_size); // *_key_d holds the start position, and *_val_d holds the length, // the end position is *_key_d + *_val_d // // Each thread will search |reps| items in A, we will keep the blocksize // fixed at 256, but we will need to adjust the grid size int grid_size = ( A_size + block_size - 1) / (block_size * 1); dim3 dimGridSearch( grid_size ); hipError_t err; // Sort A nvRadixSort::RadixSort radixsortA(A_size, false); radixsortA.sort((unsigned int*)A_key_d, (unsigned int*)A_val_d, A_size, 32); // Sort B nvRadixSort::RadixSort radixsortB(B_size, false); radixsortB.sort((unsigned int*)B_key_d, (unsigned int*)B_val_d, B_size, 32); hipDeviceSynchronize(); stop(); unsigned long sort_time = report(); unsigned int *R_h = (unsigned int *) malloc( A_size * sizeof(unsigned int)); err = hipGetLastError(); if(err != hipSuccess) fprintf(stderr, "Sort: %s.\n", hipGetErrorString( err) ); start(); hipLaunchKernelGGL(( intersection_b_search_sm) , dim3(dimGridSearch), dim3( dimBlock), 2000 * sizeof(unsigned int), 0, A_key_d, A_val_d, A_size, B_key_d, B_val_d, B_size, R_d, inter_threads); hipDeviceSynchronize(); stop(); unsigned long search_time = report(); start(); parallel_sum(R_d, block_size, A_size, sum_threads); stop(); unsigned long sum_time = report(); unsigned int O; start(); hipMemcpy(&O, R_d, 1 * sizeof(unsigned int), hipMemcpyDeviceToHost); stop(); unsigned long memdown_time = report(); unsigned long total = memup_time + sort_time + search_time + sum_time + memdown_time; fprintf(stderr,"O:%d\n", O); printf( "t:%ld\t" "up:%ld,%G\t" "sort:%ld,%G\t" "search:%ld,%G\t" "sum:%ld,%G\t" "down:%ld,%G\n", total, memup_time, (double)memup_time / (double)total, sort_time, (double)sort_time / (double)total, search_time, (double)search_time / (double)total, sum_time, (double)sum_time / (double)total, memdown_time, (double)memdown_time / (double)total ); hipFree(A_key_d); hipFree(B_key_d); hipFree(R_d); return 0; }
6712a762c1464c027b229b961b620f126048eb2e.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cutil.h> #include <sys/time.h> #include "../lib/bed.h" #include "../lib/set_intersect.h" #include "radixsort.h" //#include "gpu.hpp" #include "random.hpp" #include "../lib/timer.h" #include "set_intersect_cuda.h" int main(int argc, char *argv[]) { if (argc < 6) { fprintf(stderr, "usage: order <u> <a> <b> " "<reps> <inter N> <sum N> <device>\n" "e.g., order U.bed A.bed B.bed 10000 1 1024 1\n"); return 1; } int chrom_num = 24; CUDA_SAFE_CALL( cudaSetDevice( atoi(argv[7] ) ) ); /***********************REPLACE WITH INPUT FILE************************/ char *chrom_names[] = { "chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY" }; /**********************************************************************/ struct chr_list *U, *A, *B; char *U_file = argv[1], *A_file = argv[2], *B_file = argv[3]; int reps = atoi(argv[4]); int inter_threads = atoi(argv[5]); int sum_threads = atoi(argv[6]); if ( ( chr_list_from_bed_file(&U, chrom_names, chrom_num, U_file) == 1) || ( chr_list_from_bed_file(&A, chrom_names, chrom_num, A_file) == 1) || ( chr_list_from_bed_file(&B, chrom_names, chrom_num, B_file) == 1) ) { fprintf(stderr, "Error parsing bed files.\n"); return 1; } unsigned int max = add_offsets(U, chrom_num); trim(U, A, chrom_num); trim(U, B, chrom_num); int A_size, B_size, U_size; struct bed_line *U_array, *A_array, *B_array; U_size = chr_array_from_list(U, &U_array, chrom_num); A_size = chr_array_from_list(A, &A_array, chrom_num); B_size = chr_array_from_list(B, &B_array, chrom_num); unsigned int *A_key_h = (unsigned int *) malloc( (A_size) * sizeof(unsigned int)); unsigned int *A_val_h = (unsigned int *) malloc( (A_size) * sizeof(unsigned int)); unsigned int *B_key_h = (unsigned int *) malloc( (B_size) * sizeof(unsigned int)); unsigned int *B_val_h = (unsigned int *) malloc( (B_size) * sizeof(unsigned int)); /* * In CUDA we can sort key value pairs, * the key can be the offset, and the value can be the length */ set_start_len( U_array, U_size, A_array, A_key_h, A_val_h, A_size ); set_start_len( U_array, U_size, B_array, B_key_h, B_val_h, B_size ); // Move A and B to deviceB unsigned int *A_key_d, *A_val_d, *B_key_d, *B_val_d; cudaMalloc((void **)&A_key_d, (A_size)*sizeof(unsigned int)); cudaMalloc((void **)&A_val_d, (A_size)*sizeof(unsigned int)); cudaMalloc((void **)&B_key_d, (B_size)*sizeof(unsigned int)); cudaMalloc((void **)&B_val_d, (B_size)*sizeof(unsigned int)); start(); cudaMemcpy(A_key_d, A_key_h, (A_size) * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(A_val_d, A_val_h, (A_size) * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(B_key_d, B_key_h, (B_size) * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(B_val_d, B_val_h, (B_size) * sizeof(unsigned int), cudaMemcpyHostToDevice); stop(); // R will hold the results of the intersection, for each interval A[i], // R[i] will be the number of intervals in B that A[i] intersects, unsigned int *R_d; cudaMalloc((void **)&R_d, (A_size)*sizeof(unsigned int)); unsigned long memup_time = report(); int block_size = 256; dim3 dimBlock(block_size); // *_key_d holds the start position, and *_val_d holds the length, // the end position is *_key_d + *_val_d // // Each thread will search |reps| items in A, we will keep the blocksize // fixed at 256, but we will need to adjust the grid size int grid_size = ( A_size + block_size - 1) / (block_size * 1); dim3 dimGridSearch( grid_size ); cudaError_t err; // Sort A nvRadixSort::RadixSort radixsortA(A_size, false); radixsortA.sort((unsigned int*)A_key_d, (unsigned int*)A_val_d, A_size, 32); // Sort B nvRadixSort::RadixSort radixsortB(B_size, false); radixsortB.sort((unsigned int*)B_key_d, (unsigned int*)B_val_d, B_size, 32); cudaThreadSynchronize(); stop(); unsigned long sort_time = report(); unsigned int *R_h = (unsigned int *) malloc( A_size * sizeof(unsigned int)); err = cudaGetLastError(); if(err != cudaSuccess) fprintf(stderr, "Sort: %s.\n", cudaGetErrorString( err) ); start(); intersection_b_search_sm <<< dimGridSearch, dimBlock, 2000 * sizeof(unsigned int)>>> ( A_key_d, A_val_d, A_size, B_key_d, B_val_d, B_size, R_d, inter_threads); cudaThreadSynchronize(); stop(); unsigned long search_time = report(); start(); parallel_sum(R_d, block_size, A_size, sum_threads); stop(); unsigned long sum_time = report(); unsigned int O; start(); cudaMemcpy(&O, R_d, 1 * sizeof(unsigned int), cudaMemcpyDeviceToHost); stop(); unsigned long memdown_time = report(); unsigned long total = memup_time + sort_time + search_time + sum_time + memdown_time; fprintf(stderr,"O:%d\n", O); printf( "t:%ld\t" "up:%ld,%G\t" "sort:%ld,%G\t" "search:%ld,%G\t" "sum:%ld,%G\t" "down:%ld,%G\n", total, memup_time, (double)memup_time / (double)total, sort_time, (double)sort_time / (double)total, search_time, (double)search_time / (double)total, sum_time, (double)sum_time / (double)total, memdown_time, (double)memdown_time / (double)total ); cudaFree(A_key_d); cudaFree(B_key_d); cudaFree(R_d); return 0; }
4db793eab2d9c11e6316cff3c159445835ff86e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void clean(unsigned int * e, int n) { e[threadIdx.x % n] = 0; }
4db793eab2d9c11e6316cff3c159445835ff86e7.cu
#include "includes.h" __global__ void clean(unsigned int * e, int n) { e[threadIdx.x % n] = 0; }
ad07f6bfbbaef169e54c286df34a4ed8c4d77cf3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "src/cuda/transpose/transpose.cuh" #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" namespace { // launch (16, 16) threads template <typename T> __global__ void kernel( const T* A, T* B, uint32_t m, uint32_t n, uint32_t LDA, uint32_t LDB) { __shared__ T cache[16][16]; { uint32_t y = threadIdx.y + blockIdx.y * 16; uint32_t x = threadIdx.x + blockIdx.x * 16; if (y < m && x < n) cache[threadIdx.y][threadIdx.x] = A[y * LDA + x]; } __syncthreads(); { // variable is idx wrt B rather than A (so x/y is swapped) uint32_t x = threadIdx.x + blockIdx.y * 16; uint32_t y = threadIdx.y + blockIdx.x * 16; if (y < n && x < m) B[y * LDB + x] = cache[threadIdx.x][threadIdx.y]; } } } // anonymous namespace namespace megdnn { namespace cuda { template <typename T> void transpose( const T* A, T* B, size_t m, size_t n, size_t LDA, size_t LDB, hipStream_t stream) { dim3 threads(16, 16); dim3 blocks(DIVUP(n, 16), DIVUP(m, 16)); hipLaunchKernelGGL(( kernel<T>), dim3(blocks), dim3(threads), 0, stream, A, B, m, n, LDA, LDB); after_kernel_launch(); } #define INST(T) \ template void transpose<T>( \ const T*, T*, size_t, size_t, size_t, size_t, hipStream_t); #define cb(DType) INST(typename DTypeTrait<DType>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) #undef cb #undef INST } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
ad07f6bfbbaef169e54c286df34a4ed8c4d77cf3.cu
#include "src/cuda/transpose/transpose.cuh" #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" namespace { // launch (16, 16) threads template <typename T> __global__ void kernel( const T* A, T* B, uint32_t m, uint32_t n, uint32_t LDA, uint32_t LDB) { __shared__ T cache[16][16]; { uint32_t y = threadIdx.y + blockIdx.y * 16; uint32_t x = threadIdx.x + blockIdx.x * 16; if (y < m && x < n) cache[threadIdx.y][threadIdx.x] = A[y * LDA + x]; } __syncthreads(); { // variable is idx wrt B rather than A (so x/y is swapped) uint32_t x = threadIdx.x + blockIdx.y * 16; uint32_t y = threadIdx.y + blockIdx.x * 16; if (y < n && x < m) B[y * LDB + x] = cache[threadIdx.x][threadIdx.y]; } } } // anonymous namespace namespace megdnn { namespace cuda { template <typename T> void transpose( const T* A, T* B, size_t m, size_t n, size_t LDA, size_t LDB, cudaStream_t stream) { dim3 threads(16, 16); dim3 blocks(DIVUP(n, 16), DIVUP(m, 16)); kernel<T><<<blocks, threads, 0, stream>>>(A, B, m, n, LDA, LDB); after_kernel_launch(); } #define INST(T) \ template void transpose<T>( \ const T*, T*, size_t, size_t, size_t, size_t, cudaStream_t); #define cb(DType) INST(typename DTypeTrait<DType>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) #undef cb #undef INST } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
219dcc4edbada0440af696a55044a38b5a300094.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 32 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipMalloc(&d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc(&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); hipMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); // Read C from device memory hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } void init (float * v, int n) { int i; for(i =0;i<n;i++) { v[i]=1; } } int main (int argc, char ** argv) { int n; scanf("%d",&n); if(n%BLOCK_SIZE !=0) { n = n+n%BLOCK_SIZE; } Matrix a,b,c; a.width = a.height=n; b.width = b.height=n; c.width = c.height=n; c.elements = (float*) malloc( sizeof(float)*n*n); a.elements =(float*) malloc( sizeof(float)*n*n); b.elements =(float*) malloc( sizeof(float)*n*n); init(a.elements,n*n); init(b.elements,n*n); MatMul(a,b,c); /*for(int i =0;i<n*n;i++) { printf("%f\n",c.elements[i]); }*/ return 0; }
219dcc4edbada0440af696a55044a38b5a300094.cu
#include<stdio.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 32 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } void init (float * v, int n) { int i; for(i =0;i<n;i++) { v[i]=1; } } int main (int argc, char ** argv) { int n; scanf("%d",&n); if(n%BLOCK_SIZE !=0) { n = n+n%BLOCK_SIZE; } Matrix a,b,c; a.width = a.height=n; b.width = b.height=n; c.width = c.height=n; c.elements = (float*) malloc( sizeof(float)*n*n); a.elements =(float*) malloc( sizeof(float)*n*n); b.elements =(float*) malloc( sizeof(float)*n*n); init(a.elements,n*n); init(b.elements,n*n); MatMul(a,b,c); /*for(int i =0;i<n*n;i++) { printf("%f\n",c.elements[i]); }*/ return 0; }
7c1e34c66fe8dab441a4c2beb860275eb83e7d41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB SP code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to [email protected] // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: [email protected] // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include <assert.h> #include "exact_solution.cu" __global__ void initialize_kernel( dim3 gridOffset, int* grid_points, double (*u)/*[KMAX]*/[5][JMAXP+1][IMAXP+1], double dnxm1, double dnym1, double dnzm1 ) { int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x; int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y; int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z; int m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; //--------------------------------------------------------------------- // Later (in compute_rhs) we compute 1/u for every element. A few of // the corner elements are not used, but it convenient (and faster) // to compute the whole thing with a simple loop. Make sure those // values are nonzero by initializing the whole thing here. //--------------------------------------------------------------------- if (k >= 0 && k <= grid_points[2]-1) { if (j >= 0 && j <= grid_points[1]-1) { if (i >= 0 && i <= grid_points[0]-1) { u[k][0][j][i] = 1.0; u[k][1][j][i] = 0.0; u[k][2][j][i] = 0.0; u[k][3][j][i] = 0.0; u[k][4][j][i] = 1.0; } } } //--------------------------------------------------------------------- // first store the "interpolated" values everywhere on the grid //--------------------------------------------------------------------- if (k >= 0 && k <= grid_points[2]-1) { zeta = (double)k * dnzm1; if (j >= 0 && j <= grid_points[1]-1) { eta = (double)j * dnym1; if (i >= 0 && i <= grid_points[0]-1) { xi = (double)i * dnxm1; for (ix = 0; ix < 2; ix++) { Pxi = (double)ix; exact_solution(Pxi, eta, zeta, &Pface[ix][0][0]); } for (iy = 0; iy < 2; iy++) { Peta = (double)iy; exact_solution(xi, Peta, zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { Pzeta = (double)iz; exact_solution(xi, eta, Pzeta, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[k][m][j][i] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } //--------------------------------------------------------------------- // now store the exact values on the boundaries //--------------------------------------------------------------------- //--------------------------------------------------------------------- // west face //--------------------------------------------------------------------- xi = 0.0; if (i == 0) if (k >= 0 && k <= grid_points[2]-1) { zeta = (double)k * dnzm1; if (j >= 0 && j <= grid_points[1]-1) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // east face //--------------------------------------------------------------------- xi = 1.0; if (i == grid_points[0]-1) if (k >= 0 && k <= grid_points[2]-1) { zeta = (double)k * dnzm1; if (j >= 0 && j <= grid_points[1]-1) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // south face //--------------------------------------------------------------------- eta = 0.0; if (j == 0) if (k >= 0 && k <= grid_points[2]-1) { zeta = (double)k * dnzm1; if (i >= 0 && i <= grid_points[0]-1) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // north face //--------------------------------------------------------------------- eta = 1.0; if (j == grid_points[1]-1) if (k >= 0 && k <= grid_points[2]-1) { zeta = (double)k * dnzm1; if (i >= 0 && i <= grid_points[0]-1) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // bottom face //--------------------------------------------------------------------- zeta = 0.0; if (k == 0) if (j >= 0 && j <= grid_points[1]-1) { eta = (double)j * dnym1; if (i >= 0 && i <= grid_points[0]-1) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // top face //--------------------------------------------------------------------- zeta = 1.0; if (k == grid_points[2]-1) if (j >= 0 && j <= grid_points[1]-1) { eta = (double)j * dnym1; if (i >= 0 && i <= grid_points[0]-1) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } } //--------------------------------------------------------------------- // This subroutine initializes the field variable u using // tri-linear transfinite interpolation of the boundary values //--------------------------------------------------------------------- void initialize() { if (timeron) timer_start(t_init); hipLaunchKernelGGL(( initialize_kernel) , dim3(gridDim_), dim3(blockDim_) , 0, 0, gridOffset, dev_grid_points[device], dev_u[device], dnxm1, dnym1, dnzm1); if (timeron) timer_stop(t_init); }
7c1e34c66fe8dab441a4c2beb860275eb83e7d41.cu
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB SP code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to [email protected] // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: [email protected] // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include <assert.h> #include "exact_solution.cu" __global__ void initialize_kernel( dim3 gridOffset, int* grid_points, double (*u)/*[KMAX]*/[5][JMAXP+1][IMAXP+1], double dnxm1, double dnym1, double dnzm1 ) { int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x; int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y; int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z; int m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; //--------------------------------------------------------------------- // Later (in compute_rhs) we compute 1/u for every element. A few of // the corner elements are not used, but it convenient (and faster) // to compute the whole thing with a simple loop. Make sure those // values are nonzero by initializing the whole thing here. //--------------------------------------------------------------------- if (k >= 0 && k <= grid_points[2]-1) { if (j >= 0 && j <= grid_points[1]-1) { if (i >= 0 && i <= grid_points[0]-1) { u[k][0][j][i] = 1.0; u[k][1][j][i] = 0.0; u[k][2][j][i] = 0.0; u[k][3][j][i] = 0.0; u[k][4][j][i] = 1.0; } } } //--------------------------------------------------------------------- // first store the "interpolated" values everywhere on the grid //--------------------------------------------------------------------- if (k >= 0 && k <= grid_points[2]-1) { zeta = (double)k * dnzm1; if (j >= 0 && j <= grid_points[1]-1) { eta = (double)j * dnym1; if (i >= 0 && i <= grid_points[0]-1) { xi = (double)i * dnxm1; for (ix = 0; ix < 2; ix++) { Pxi = (double)ix; exact_solution(Pxi, eta, zeta, &Pface[ix][0][0]); } for (iy = 0; iy < 2; iy++) { Peta = (double)iy; exact_solution(xi, Peta, zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { Pzeta = (double)iz; exact_solution(xi, eta, Pzeta, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[k][m][j][i] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } //--------------------------------------------------------------------- // now store the exact values on the boundaries //--------------------------------------------------------------------- //--------------------------------------------------------------------- // west face //--------------------------------------------------------------------- xi = 0.0; if (i == 0) if (k >= 0 && k <= grid_points[2]-1) { zeta = (double)k * dnzm1; if (j >= 0 && j <= grid_points[1]-1) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // east face //--------------------------------------------------------------------- xi = 1.0; if (i == grid_points[0]-1) if (k >= 0 && k <= grid_points[2]-1) { zeta = (double)k * dnzm1; if (j >= 0 && j <= grid_points[1]-1) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // south face //--------------------------------------------------------------------- eta = 0.0; if (j == 0) if (k >= 0 && k <= grid_points[2]-1) { zeta = (double)k * dnzm1; if (i >= 0 && i <= grid_points[0]-1) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // north face //--------------------------------------------------------------------- eta = 1.0; if (j == grid_points[1]-1) if (k >= 0 && k <= grid_points[2]-1) { zeta = (double)k * dnzm1; if (i >= 0 && i <= grid_points[0]-1) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // bottom face //--------------------------------------------------------------------- zeta = 0.0; if (k == 0) if (j >= 0 && j <= grid_points[1]-1) { eta = (double)j * dnym1; if (i >= 0 && i <= grid_points[0]-1) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // top face //--------------------------------------------------------------------- zeta = 1.0; if (k == grid_points[2]-1) if (j >= 0 && j <= grid_points[1]-1) { eta = (double)j * dnym1; if (i >= 0 && i <= grid_points[0]-1) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[k][m][j][i] = temp[m]; } } } } //--------------------------------------------------------------------- // This subroutine initializes the field variable u using // tri-linear transfinite interpolation of the boundary values //--------------------------------------------------------------------- void initialize() { if (timeron) timer_start(t_init); initialize_kernel <<< gridDim_, blockDim_ >>> (gridOffset, dev_grid_points[device], dev_u[device], dnxm1, dnym1, dnzm1); if (timeron) timer_stop(t_init); }
ea892c2bff7e4aa3407cccd23083915a7fe6fc06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" // kernels borrowed from Caffe template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_data[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_data[maxidx]; } } } top_data[index] = maxval; top_mask[index] = maxidx + 1; } } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, const Dtype* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; int phend = min((h + pad_h) / stride_h + 1, pooled_height); int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; top_mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw] - 1 == h * width + w) { gradient += top_diff[ph * pooled_width + pw]; } } } bottom_diff[index] = gradient; } } void THNN_CudaSpatialMaxPooling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode) { THCUNN_assertSameGPU(state, 3, input, output, indices); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); long nInputCols, nInputRows, nInputPlane, batchSize; long nOutputCols, nOutputRows; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } THArgCheck(nInputCols >= kW - padW && nInputRows >= kH - padH, 2, "input image smaller than kernel size"); THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size"); if(ceil_mode) { nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } else { nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image if ((nOutputRows - 1)*dH >= nInputRows + padH) --nOutputRows; if ((nOutputCols - 1)*dW >= nInputCols + padW) --nOutputCols; } input = THCudaTensor_newContiguous(state, input); float* input_data = THCudaTensor_data(state, input); THCudaTensor_resize4d(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols); THCudaTensor_resizeAs(state, indices, output); float* indices_data = THCudaTensor_data(state, indices); float* output_data = THCudaTensor_data(state, output); int count = THCudaTensor_nElement(state, output); hipLaunchKernelGGL(( MaxPoolForward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, input_data, batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, output_data, indices_data); if(input->nDimension == 3) THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); THCudaTensor_free(state, input); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialMaxPooling.updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } } void THNN_CudaSpatialMaxPooling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode) { THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput); input = THCudaTensor_newContiguous(state, input); gradOutput = THCudaTensor_newContiguous(state, gradOutput); long nInputCols, nInputRows, nInputPlane, batchSize; long nOutputCols, nOutputRows; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } if(ceil_mode) { nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } else { nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } gradOutput = THCudaTensor_newContiguous(state, gradOutput); THCudaTensor_resizeAs(state, gradInput, input); int count = THCudaTensor_nElement(state, input); hipLaunchKernelGGL(( MaxPoolBackward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, THCudaTensor_data(state, gradInput)); THCudaTensor_free(state, gradOutput); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialMaxPooling.updateGradInput: %s\n", hipGetErrorString(err)); THError("aborting"); } // clean THCudaTensor_free(state, input); THCudaTensor_free(state, gradOutput); }
ea892c2bff7e4aa3407cccd23083915a7fe6fc06.cu
#include "THCUNN.h" #include "common.h" // kernels borrowed from Caffe template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_data[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_data[maxidx]; } } } top_data[index] = maxval; top_mask[index] = maxidx + 1; } } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, const Dtype* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; int phend = min((h + pad_h) / stride_h + 1, pooled_height); int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; top_mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw] - 1 == h * width + w) { gradient += top_diff[ph * pooled_width + pw]; } } } bottom_diff[index] = gradient; } } void THNN_CudaSpatialMaxPooling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode) { THCUNN_assertSameGPU(state, 3, input, output, indices); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); long nInputCols, nInputRows, nInputPlane, batchSize; long nOutputCols, nOutputRows; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } THArgCheck(nInputCols >= kW - padW && nInputRows >= kH - padH, 2, "input image smaller than kernel size"); THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size"); if(ceil_mode) { nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } else { nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image if ((nOutputRows - 1)*dH >= nInputRows + padH) --nOutputRows; if ((nOutputCols - 1)*dW >= nInputCols + padW) --nOutputCols; } input = THCudaTensor_newContiguous(state, input); float* input_data = THCudaTensor_data(state, input); THCudaTensor_resize4d(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols); THCudaTensor_resizeAs(state, indices, output); float* indices_data = THCudaTensor_data(state, indices); float* output_data = THCudaTensor_data(state, output); int count = THCudaTensor_nElement(state, output); MaxPoolForward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, input_data, batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, output_data, indices_data); if(input->nDimension == 3) THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); THCudaTensor_free(state, input); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialMaxPooling.updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } } void THNN_CudaSpatialMaxPooling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode) { THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput); input = THCudaTensor_newContiguous(state, input); gradOutput = THCudaTensor_newContiguous(state, gradOutput); long nInputCols, nInputRows, nInputPlane, batchSize; long nOutputCols, nOutputRows; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } if(ceil_mode) { nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } else { nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } gradOutput = THCudaTensor_newContiguous(state, gradOutput); THCudaTensor_resizeAs(state, gradInput, input); int count = THCudaTensor_nElement(state, input); MaxPoolBackward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, THCudaTensor_data(state, gradInput)); THCudaTensor_free(state, gradOutput); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialMaxPooling.updateGradInput: %s\n", cudaGetErrorString(err)); THError("aborting"); } // clean THCudaTensor_free(state, input); THCudaTensor_free(state, gradOutput); }
d4b109f29b9dcf426df6d5107dbcb9a346bd4206.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #define BLOCK_SIZE 16 __global__ void lud_diagonal(float *m, int matrix_dim, int offset) { int i,j; __shared__ float shadow[BLOCK_SIZE][BLOCK_SIZE]; int array_offset = offset*matrix_dim+offset; for(i=0; i < BLOCK_SIZE; i++){ shadow[i][threadIdx.x]=m[array_offset+threadIdx.x]; array_offset += matrix_dim; } __syncthreads(); for(i=0; i < BLOCK_SIZE-1; i++) { if (threadIdx.x>i){ for(j=0; j < i; j++) shadow[threadIdx.x][i] -= shadow[threadIdx.x][j]*shadow[j][i]; shadow[threadIdx.x][i] /= shadow[i][i]; } __syncthreads(); if (threadIdx.x>i) { for(j=0; j < i+1; j++) shadow[i+1][threadIdx.x] -= shadow[i+1][j]*shadow[j][threadIdx.x]; } __syncthreads(); } /* The first row is not modified, it is no need to write it back to the global memory */ array_offset = (offset+1)*matrix_dim+offset; for(i=1; i < BLOCK_SIZE; i++){ m[array_offset+threadIdx.x]=shadow[i][threadIdx.x]; array_offset += matrix_dim; } } __global__ void lud_perimeter(float *m, int matrix_dim, int offset) { __shared__ float dia[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_row[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_col[BLOCK_SIZE][BLOCK_SIZE]; int i,j, array_offset; int idx; if (threadIdx.x < BLOCK_SIZE) { idx = threadIdx.x; array_offset = offset*matrix_dim+offset; for (i=0; i < BLOCK_SIZE/2; i++){ dia[i][idx]=m[array_offset+idx]; array_offset += matrix_dim; } array_offset = offset*matrix_dim+offset; for (i=0; i < BLOCK_SIZE; i++) { peri_row[i][idx]=m[array_offset+(blockIdx.x+1)*BLOCK_SIZE+idx]; array_offset += matrix_dim; } } else { idx = threadIdx.x-BLOCK_SIZE; array_offset = (offset+BLOCK_SIZE/2)*matrix_dim+offset; for (i=BLOCK_SIZE/2; i < BLOCK_SIZE; i++){ dia[i][idx]=m[array_offset+idx]; array_offset += matrix_dim; } array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; for (i=0; i < BLOCK_SIZE; i++) { peri_col[i][idx] = m[array_offset+idx]; array_offset += matrix_dim; } } __syncthreads(); if (threadIdx.x < BLOCK_SIZE) { //peri-row idx=threadIdx.x; for(i=1; i < BLOCK_SIZE; i++){ for (j=0; j < i; j++) peri_row[i][idx]-=dia[i][j]*peri_row[j][idx]; } array_offset = (offset+1)*matrix_dim+offset; for(i=1; i < BLOCK_SIZE; i++){ m[array_offset+(blockIdx.x+1)*BLOCK_SIZE+idx] = peri_row[i][idx]; array_offset += matrix_dim; } } else { //peri-col idx=threadIdx.x - BLOCK_SIZE; for(i=0; i < BLOCK_SIZE; i++){ for(j=0; j < i; j++) peri_col[idx][i]-=peri_col[idx][j]*dia[j][i]; peri_col[idx][i] /= dia[i][i]; } } __syncthreads(); if (threadIdx.x >= BLOCK_SIZE) { array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; for(i=0; i < BLOCK_SIZE; i++){ m[array_offset+idx] = peri_col[i][idx]; array_offset += matrix_dim; } } } __global__ void lud_internal(float *m, int matrix_dim, int offset) { __shared__ float peri_row[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_col[BLOCK_SIZE][BLOCK_SIZE]; int i; float sum; int global_row_id = offset + (blockIdx.y+1)*BLOCK_SIZE; int global_col_id = offset + (blockIdx.x+1)*BLOCK_SIZE; peri_row[threadIdx.y][threadIdx.x] = m[(offset+threadIdx.y)*matrix_dim+global_col_id+threadIdx.x]; peri_col[threadIdx.y][threadIdx.x] = m[(global_row_id+threadIdx.y)*matrix_dim+offset+threadIdx.x]; __syncthreads(); sum = 0; for (i=0; i < BLOCK_SIZE; i++) sum += peri_col[threadIdx.y][i] * peri_row[i][threadIdx.x]; m[(global_row_id+threadIdx.y)*matrix_dim+global_col_id+threadIdx.x] -= sum; } void lud_cuda(float *m, int matrix_dim) { int i=0; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); float *m_debug = (float*)malloc(matrix_dim*matrix_dim*sizeof(float)); for (i=0; i < matrix_dim-BLOCK_SIZE; i += BLOCK_SIZE) { hipLaunchKernelGGL(( lud_diagonal), dim3(1), dim3(BLOCK_SIZE), 0, 0, m, matrix_dim, i); hipLaunchKernelGGL(( lud_perimeter), dim3((matrix_dim-i)/BLOCK_SIZE-1), dim3(BLOCK_SIZE*2), 0, 0, m, matrix_dim, i); dim3 dimGrid((matrix_dim-i)/BLOCK_SIZE-1, (matrix_dim-i)/BLOCK_SIZE-1); hipLaunchKernelGGL(( lud_internal), dim3(dimGrid), dim3(dimBlock), 0, 0, m, matrix_dim, i); } hipLaunchKernelGGL(( lud_diagonal), dim3(1),dim3(BLOCK_SIZE), 0, 0, m, matrix_dim, i); }
d4b109f29b9dcf426df6d5107dbcb9a346bd4206.cu
#include <cuda.h> #include <stdio.h> #define BLOCK_SIZE 16 __global__ void lud_diagonal(float *m, int matrix_dim, int offset) { int i,j; __shared__ float shadow[BLOCK_SIZE][BLOCK_SIZE]; int array_offset = offset*matrix_dim+offset; for(i=0; i < BLOCK_SIZE; i++){ shadow[i][threadIdx.x]=m[array_offset+threadIdx.x]; array_offset += matrix_dim; } __syncthreads(); for(i=0; i < BLOCK_SIZE-1; i++) { if (threadIdx.x>i){ for(j=0; j < i; j++) shadow[threadIdx.x][i] -= shadow[threadIdx.x][j]*shadow[j][i]; shadow[threadIdx.x][i] /= shadow[i][i]; } __syncthreads(); if (threadIdx.x>i) { for(j=0; j < i+1; j++) shadow[i+1][threadIdx.x] -= shadow[i+1][j]*shadow[j][threadIdx.x]; } __syncthreads(); } /* The first row is not modified, it is no need to write it back to the global memory */ array_offset = (offset+1)*matrix_dim+offset; for(i=1; i < BLOCK_SIZE; i++){ m[array_offset+threadIdx.x]=shadow[i][threadIdx.x]; array_offset += matrix_dim; } } __global__ void lud_perimeter(float *m, int matrix_dim, int offset) { __shared__ float dia[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_row[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_col[BLOCK_SIZE][BLOCK_SIZE]; int i,j, array_offset; int idx; if (threadIdx.x < BLOCK_SIZE) { idx = threadIdx.x; array_offset = offset*matrix_dim+offset; for (i=0; i < BLOCK_SIZE/2; i++){ dia[i][idx]=m[array_offset+idx]; array_offset += matrix_dim; } array_offset = offset*matrix_dim+offset; for (i=0; i < BLOCK_SIZE; i++) { peri_row[i][idx]=m[array_offset+(blockIdx.x+1)*BLOCK_SIZE+idx]; array_offset += matrix_dim; } } else { idx = threadIdx.x-BLOCK_SIZE; array_offset = (offset+BLOCK_SIZE/2)*matrix_dim+offset; for (i=BLOCK_SIZE/2; i < BLOCK_SIZE; i++){ dia[i][idx]=m[array_offset+idx]; array_offset += matrix_dim; } array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; for (i=0; i < BLOCK_SIZE; i++) { peri_col[i][idx] = m[array_offset+idx]; array_offset += matrix_dim; } } __syncthreads(); if (threadIdx.x < BLOCK_SIZE) { //peri-row idx=threadIdx.x; for(i=1; i < BLOCK_SIZE; i++){ for (j=0; j < i; j++) peri_row[i][idx]-=dia[i][j]*peri_row[j][idx]; } array_offset = (offset+1)*matrix_dim+offset; for(i=1; i < BLOCK_SIZE; i++){ m[array_offset+(blockIdx.x+1)*BLOCK_SIZE+idx] = peri_row[i][idx]; array_offset += matrix_dim; } } else { //peri-col idx=threadIdx.x - BLOCK_SIZE; for(i=0; i < BLOCK_SIZE; i++){ for(j=0; j < i; j++) peri_col[idx][i]-=peri_col[idx][j]*dia[j][i]; peri_col[idx][i] /= dia[i][i]; } } __syncthreads(); if (threadIdx.x >= BLOCK_SIZE) { array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; for(i=0; i < BLOCK_SIZE; i++){ m[array_offset+idx] = peri_col[i][idx]; array_offset += matrix_dim; } } } __global__ void lud_internal(float *m, int matrix_dim, int offset) { __shared__ float peri_row[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_col[BLOCK_SIZE][BLOCK_SIZE]; int i; float sum; int global_row_id = offset + (blockIdx.y+1)*BLOCK_SIZE; int global_col_id = offset + (blockIdx.x+1)*BLOCK_SIZE; peri_row[threadIdx.y][threadIdx.x] = m[(offset+threadIdx.y)*matrix_dim+global_col_id+threadIdx.x]; peri_col[threadIdx.y][threadIdx.x] = m[(global_row_id+threadIdx.y)*matrix_dim+offset+threadIdx.x]; __syncthreads(); sum = 0; for (i=0; i < BLOCK_SIZE; i++) sum += peri_col[threadIdx.y][i] * peri_row[i][threadIdx.x]; m[(global_row_id+threadIdx.y)*matrix_dim+global_col_id+threadIdx.x] -= sum; } void lud_cuda(float *m, int matrix_dim) { int i=0; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); float *m_debug = (float*)malloc(matrix_dim*matrix_dim*sizeof(float)); for (i=0; i < matrix_dim-BLOCK_SIZE; i += BLOCK_SIZE) { lud_diagonal<<<1, BLOCK_SIZE>>>(m, matrix_dim, i); lud_perimeter<<<(matrix_dim-i)/BLOCK_SIZE-1, BLOCK_SIZE*2>>>(m, matrix_dim, i); dim3 dimGrid((matrix_dim-i)/BLOCK_SIZE-1, (matrix_dim-i)/BLOCK_SIZE-1); lud_internal<<<dimGrid, dimBlock>>>(m, matrix_dim, i); } lud_diagonal<<<1,BLOCK_SIZE>>>(m, matrix_dim, i); }
3ec1be89e16a2adf9a1ee699e067678176565402.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 256 #define TPB 64 __global__ void printKernel() { // Get thread ID const int i = blockIdx.x*blockDim.x + threadIdx.x; // Print message printf("Hello World! My threadId is %d\n\n", i); } int main() { // Launch kernel to print hipLaunchKernelGGL(( printKernel), dim3(N/TPB), dim3(TPB), 0, 0, ); hipDeviceSynchronize(); return 0; }
3ec1be89e16a2adf9a1ee699e067678176565402.cu
#include <stdio.h> #define N 256 #define TPB 64 __global__ void printKernel() { // Get thread ID const int i = blockIdx.x*blockDim.x + threadIdx.x; // Print message printf("Hello World! My threadId is %d\n\n", i); } int main() { // Launch kernel to print printKernel<<<N/TPB, TPB>>>(); cudaDeviceSynchronize(); return 0; }
8197c07e2b3af86f805f8134467a4690728a3e5f.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 16, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
8197c07e2b3af86f805f8134467a4690728a3e5f.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 16, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
442d829f2f9542b3472780bbab2afb900958a95d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "poisson2d.hpp" #include "timer.hpp" #include <algorithm> #include <iostream> #include <stdio.h> __global__ void scan_kernel_1(double const *X, double *Y, int N, double *carries) { __shared__ double shared_buffer[256]; double my_value; unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); unsigned int block_offset = 0; // run scan on each section for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) { // load data: my_value = (i < N) ? X[i] : 0; // inclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); if (threadIdx.x >= stride) my_value += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); // exclusive scan requires us to write a zero value at the beginning of each block my_value = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; // write to output array if (i < N) Y[i] = block_offset + my_value; block_offset += shared_buffer[blockDim.x-1]; } // write carry: if (threadIdx.x == 0) carries[blockIdx.x] = block_offset; } // exclusive-scan of carries __global__ void scan_kernel_2(double *carries) { __shared__ double shared_buffer[256]; // load data: double my_carry = carries[threadIdx.x]; // exclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); if (threadIdx.x >= stride) my_carry += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); // write to output array carries[threadIdx.x] = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; } __global__ void scan_kernel_3(double *Y, int N, double const *carries) { unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); __shared__ double shared_offset; if (threadIdx.x == 0) shared_offset = carries[blockIdx.x]; __syncthreads(); // add offset to each element in the block: for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) if (i < N) Y[i] += shared_offset; } __global__ void makeInclusive(double *Y, int N, const double *X) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N-1; i += gridDim.x * blockDim.x) { Y[i] = Y[i+1]; } if (blockDim.x * blockIdx.x + threadIdx.x == 0) Y[N-1] += X[N-1]; } void exclusive_scan(double const * input, double * output, int N) { int num_blocks = 256; int threads_per_block = 256; double *carries; hipMalloc(&carries, sizeof(double) * num_blocks); // First step: Scan within each thread group and write carries hipLaunchKernelGGL(( scan_kernel_1), dim3(num_blocks), dim3(threads_per_block), 0, 0, input, output, N, carries); // Second step: Compute offset for each thread group (exclusive scan for each thread group) hipLaunchKernelGGL(( scan_kernel_2), dim3(1), dim3(num_blocks), 0, 0, carries); // Third step: Offset each thread group accordingly hipLaunchKernelGGL(( scan_kernel_3), dim3(num_blocks), dim3(threads_per_block), 0, 0, output, N, carries); // Make inclusive hipLaunchKernelGGL(( makeInclusive), dim3(num_blocks), dim3(threads_per_block), 0, 0, output, N, input); hipFree(carries); } int main() { int N = 200; // // Allocate host arrays for reference // double *x = (double *)malloc(sizeof(double) * N); double *y = (double *)malloc(sizeof(double) * N); double *z = (double *)malloc(sizeof(double) * N); std::fill(x, x + N, 1); // reference calculation: y[0] = 0; for (std::size_t i=1; i<N; ++i) y[i] = y[i-1] + x[i-1]; // // Allocate CUDA-arrays // double *cuda_x, *cuda_y; hipMalloc(&cuda_x, sizeof(double) * N); hipMalloc(&cuda_y, sizeof(double) * N); hipMemcpy(cuda_x, x, sizeof(double) * N, hipMemcpyHostToDevice); // Perform the exclusive scan and obtain results exclusive_scan(cuda_x, cuda_y, N); hipMemcpy(z, cuda_y, sizeof(double) * N, hipMemcpyDeviceToHost); // // Print first few entries for reference // std::cout << "CPU y: "; for (int i=0; i<10; ++i) std::cout << y[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << y[i] << " "; std::cout << std::endl; std::cout << "GPU y: "; for (int i=0; i<10; ++i) std::cout << z[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << z[i] << " "; std::cout << std::endl; // // Clean up: // free(x); free(y); free(z); hipFree(cuda_x); hipFree(cuda_y); return EXIT_SUCCESS; }
442d829f2f9542b3472780bbab2afb900958a95d.cu
#include "poisson2d.hpp" #include "timer.hpp" #include <algorithm> #include <iostream> #include <stdio.h> __global__ void scan_kernel_1(double const *X, double *Y, int N, double *carries) { __shared__ double shared_buffer[256]; double my_value; unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); unsigned int block_offset = 0; // run scan on each section for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) { // load data: my_value = (i < N) ? X[i] : 0; // inclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); if (threadIdx.x >= stride) my_value += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); // exclusive scan requires us to write a zero value at the beginning of each block my_value = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; // write to output array if (i < N) Y[i] = block_offset + my_value; block_offset += shared_buffer[blockDim.x-1]; } // write carry: if (threadIdx.x == 0) carries[blockIdx.x] = block_offset; } // exclusive-scan of carries __global__ void scan_kernel_2(double *carries) { __shared__ double shared_buffer[256]; // load data: double my_carry = carries[threadIdx.x]; // exclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); if (threadIdx.x >= stride) my_carry += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); // write to output array carries[threadIdx.x] = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; } __global__ void scan_kernel_3(double *Y, int N, double const *carries) { unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); __shared__ double shared_offset; if (threadIdx.x == 0) shared_offset = carries[blockIdx.x]; __syncthreads(); // add offset to each element in the block: for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) if (i < N) Y[i] += shared_offset; } __global__ void makeInclusive(double *Y, int N, const double *X) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N-1; i += gridDim.x * blockDim.x) { Y[i] = Y[i+1]; } if (blockDim.x * blockIdx.x + threadIdx.x == 0) Y[N-1] += X[N-1]; } void exclusive_scan(double const * input, double * output, int N) { int num_blocks = 256; int threads_per_block = 256; double *carries; cudaMalloc(&carries, sizeof(double) * num_blocks); // First step: Scan within each thread group and write carries scan_kernel_1<<<num_blocks, threads_per_block>>>(input, output, N, carries); // Second step: Compute offset for each thread group (exclusive scan for each thread group) scan_kernel_2<<<1, num_blocks>>>(carries); // Third step: Offset each thread group accordingly scan_kernel_3<<<num_blocks, threads_per_block>>>(output, N, carries); // Make inclusive makeInclusive<<<num_blocks, threads_per_block>>>(output, N, input); cudaFree(carries); } int main() { int N = 200; // // Allocate host arrays for reference // double *x = (double *)malloc(sizeof(double) * N); double *y = (double *)malloc(sizeof(double) * N); double *z = (double *)malloc(sizeof(double) * N); std::fill(x, x + N, 1); // reference calculation: y[0] = 0; for (std::size_t i=1; i<N; ++i) y[i] = y[i-1] + x[i-1]; // // Allocate CUDA-arrays // double *cuda_x, *cuda_y; cudaMalloc(&cuda_x, sizeof(double) * N); cudaMalloc(&cuda_y, sizeof(double) * N); cudaMemcpy(cuda_x, x, sizeof(double) * N, cudaMemcpyHostToDevice); // Perform the exclusive scan and obtain results exclusive_scan(cuda_x, cuda_y, N); cudaMemcpy(z, cuda_y, sizeof(double) * N, cudaMemcpyDeviceToHost); // // Print first few entries for reference // std::cout << "CPU y: "; for (int i=0; i<10; ++i) std::cout << y[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << y[i] << " "; std::cout << std::endl; std::cout << "GPU y: "; for (int i=0; i<10; ++i) std::cout << z[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << z[i] << " "; std::cout << std::endl; // // Clean up: // free(x); free(y); free(z); cudaFree(cuda_x); cudaFree(cuda_y); return EXIT_SUCCESS; }
ed56cd3cb8b6758c7d52f0b35539d165850880ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <chrono> #include <algorithm> #include "common_hip.cuh" #include "select.cuh" using namespace std; #ifndef NVAL #define NVAL (1024*64*1024*4) #endif constexpr int N = NVAL; uint32_t unstable_select_gpu2(int32_t *src, int32_t *dst, uint32_t N, int32_t pred, const dim3 &dimGrid, const dim3 &dimBlock, hipEvent_t &start, hipEvent_t &stop){ int32_t *buffer; int32_t grid_size = dimGrid.x * dimGrid.y * dimGrid.z; gpu(hipSetDevice(0)); gpu(hipMalloc((void**)&buffer, (grid_size * 4 * WARPSIZE + 2)* sizeof(int32_t))); uint32_t* counters = (uint32_t *) (buffer + grid_size * 4 * WARPSIZE); // initialize global counters gpu(hipMemset(buffer + grid_size * 4 * WARPSIZE - grid_size, 0, (grid_size + 2) * sizeof(int32_t))); int32_t *buffer2; int32_t *dst2; gpu(hipSetDevice(1)); hipMalloc((void**)&dst2, N/2*sizeof(int32_t)); gpu(hipMalloc((void**)&buffer2, (grid_size * 4 * WARPSIZE + 2)* sizeof(int32_t))); uint32_t* counters2 = (uint32_t *) (buffer2 + grid_size * 4 * WARPSIZE); // initialize global counters gpu(hipMemset(buffer2 + grid_size * 4 * WARPSIZE - grid_size, 0, (grid_size + 2) * sizeof(int32_t))); gpu(hipSetDevice(0)); hipEventRecord(start); size_t shared_mem = (9 * dimBlock.x * dimBlock.y + BRDCSTMEM(dimBlock) + ((dimBlock.x * dimBlock.y) / WARPSIZE))*sizeof(int32_t); // run kernel hipLaunchKernelGGL(( unstable_select), dim3(dimGrid), dim3(dimBlock), shared_mem, 0, src, dst, N/2, pred, buffer, counters, counters+1); gpu(hipSetDevice(1)); // run kernel hipStreamWaitEvent(NULL, start, 0); //only for correctly counting the time for both kernels hipLaunchKernelGGL(( unstable_select), dim3(dimGrid), dim3(dimBlock), shared_mem, 0, src+N/2, dst2, N/2, pred, buffer2, counters2, counters2+1); gpu(hipSetDevice(0)); #ifndef NDEBUG gpu(hipPeekAtLastError() ); gpu(hipDeviceSynchronize()); #endif // wait to read counters from device uint32_t h_counters[2]; gpu(hipMemcpy(h_counters, counters, 2 * sizeof(uint32_t), hipMemcpyDefault)); uint32_t h_output_size = h_counters[0]; uint32_t h_buffer_end = h_counters[1]; uint32_t h_buffer_start= (h_counters[1]/(4*WARPSIZE))*(4*WARPSIZE); uint32_t h_buffer_size = h_buffer_end - h_buffer_start; assert(h_buffer_start % (4*WARPSIZE) == 0); assert(h_buffer_end >= h_buffer_start); assert(h_buffer_size < 4*WARPSIZE); // combine results if (h_buffer_size > 0) hipMemcpy(dst+h_output_size, buffer+h_buffer_start, h_buffer_size * sizeof(int32_t), hipMemcpyDefault); gpu(hipSetDevice(1)); #ifndef NDEBUG gpu(hipPeekAtLastError() ); gpu(hipDeviceSynchronize()); #endif gpu(hipMemcpy(h_counters, counters2, 2 * sizeof(uint32_t), hipMemcpyDefault)); uint32_t h_output_size2 = h_counters[0]; uint32_t h_buffer_end2 = h_counters[1]; uint32_t h_buffer_start2= (h_counters[1]/(4*WARPSIZE))*(4*WARPSIZE); uint32_t h_buffer_size2 = h_buffer_end2 - h_buffer_start2; assert(h_buffer_start2 % (4*WARPSIZE) == 0); assert(h_buffer_end2 >= h_buffer_start2); assert(h_buffer_size2 < 4*WARPSIZE); // combine results if (h_buffer_size2 > 0) hipMemcpy(dst+h_output_size+h_buffer_size+h_output_size2, buffer2+h_buffer_start2, h_buffer_size2 * sizeof(int32_t), hipMemcpyDefault); gpu(hipSetDevice(1)); gpu(hipMemcpy(dst+h_output_size+h_buffer_size, dst2, h_output_size2*sizeof(int32_t), hipMemcpyDefault)); gpu(hipSetDevice(0)); hipEventRecord(stop); gpu(hipSetDevice(0)); gpu(hipFree(dst2)); return h_output_size+h_buffer_size+h_output_size2+h_buffer_size2; } int32_t *a; int32_t *b; int main(){ gpu(hipSetDevice(1)); gpu(hipFree(0)); //initialize devices on demand gpu(hipSetDevice(0)); gpu(hipFree(0)); //initialize devices on demand srand(time(0)); a = (int32_t*) malloc(N*sizeof(int32_t)); b = (int32_t*) malloc(N*sizeof(int32_t)); for (int i = 0 ; i < N ; ++i) a[i] = rand() % 100 + 1; // char *ad; // int *bd; // const int csize = N*sizeof(char); // const int isize = N*sizeof(int); double millis = 0; { auto start = chrono::high_resolution_clock::now(); stable_select_cpu(a, b, N); auto end = chrono::high_resolution_clock::now(); auto diff = end - start; millis = chrono::duration<double, milli>(diff).count(); cout << millis << " ms" << endl; } int32_t *a_pinned; int32_t *b_pinned; hipEvent_t start, stop, start1, stop1, start2, stop2; hipEventCreate(&start); hipEventCreate(&stop); hipEventCreate(&start1); hipEventCreate(&stop1); hipEventCreate(&start2); hipEventCreate(&stop2); dim3 dimBlock(1024, 1 ); dim3 dimGrid( 8, 1 ); hipHostMalloc((void**)&a_pinned, N*sizeof(int32_t)); hipHostMalloc((void**)&b_pinned, N*sizeof(int32_t)); memcpy(a_pinned, a, N*sizeof(int32_t)); #ifndef NTESTUVA int results1; { auto ts = chrono::high_resolution_clock::now(); results1 = unstable_select_gpu2(a_pinned, b_pinned, N, 50, dimGrid, dimBlock, start1, stop1); auto te = chrono::high_resolution_clock::now(); auto diff = te - ts; auto millis = chrono::duration<double, milli>(diff).count(); cout << millis << " ms" << endl; } #else int results1 = 0; #endif #ifndef NTESTMEMCPY int32_t *a_dev, *b_dev; gpu(hipMalloc( (void**)&a_dev, N*sizeof(int32_t))); gpu(hipMalloc( (void**)&b_dev, N*sizeof(int32_t))); hipEventRecord(start); gpu(hipMemcpy( a_dev, a_pinned, N*sizeof(int32_t), hipMemcpyDefault)); int results2 = unstable_select_gpu(a_dev, b_dev, N, 50, dimGrid, dimBlock, start2, stop2); gpu(hipMemcpy(a_pinned, b_dev, N*sizeof(int32_t), hipMemcpyDefault)); hipEventRecord(stop); gpu(hipFree(a_dev)); gpu(hipFree(b_dev)); #else int results2 = 0; #endif hipEventSynchronize(stop); hipDeviceSynchronize(); #ifndef NDEBUG int results = N; for (int i = 0 ; i < N ; ++i) { if (b[i] == -1) { results = i; break; } else { assert(b[i] <= 50); assert(b[i] > 0); } } #ifndef NTESTUVA for (int i = 0 ; i < results1 ; ++i) { if (b_pinned[i] <= 0 || b_pinned[i] > 50){ cout << b_pinned[i] << " " << i << endl; } // assert(b_pinned[i] <= 50); // assert(b_pinned[i] > 0); } #endif #ifndef NTESTMEMCPY for (int i = 0 ; i < results2 ; ++i) { if (a_pinned[i] <= 0 || a_pinned[i] > 50){ cout << a_pinned[i] << " " << i << endl; } // assert(b_pinned[i] <= 50); // assert(b_pinned[i] > 0); } #endif cout << results << " " << results1 << " " << results2 << " " << a_pinned[4] << endl; // assert(results1 == results2); if (results != results1){ cout << "Wrong results!!!!!!" << endl; } else { sort(b_pinned, b_pinned + results); sort(b , b + results); for (int i = 0 ; i < results ; ++i){ if (b[i] != b_pinned[i]){ cout << "Wrong result: " << b_pinned[i] << " (vs " << b[i] << ") @" << i << " !!!!!!" << endl; exit(-1); } } } #endif gpu(hipHostFree(a_pinned)); gpu(hipHostFree(b_pinned)); float milliseconds1 = 0; hipEventElapsedTime(&milliseconds1, start, stop); cout << milliseconds1 << endl; float milliseconds2 = 0; hipEventElapsedTime(&milliseconds2, start1, stop1); cout << milliseconds2 << endl; float milliseconds3 = 0; hipEventElapsedTime(&milliseconds3, start2, stop2); cout << milliseconds3 << endl; cout << endl; cout << millis/milliseconds1 << endl; cout << millis/milliseconds2 << endl; cout << millis/milliseconds3 << endl; hipDeviceSynchronize(); hipDeviceReset(); return EXIT_SUCCESS; }
ed56cd3cb8b6758c7d52f0b35539d165850880ed.cu
#include <iostream> #include <chrono> #include <algorithm> #include "common.cuh" #include "select.cuh" using namespace std; #ifndef NVAL #define NVAL (1024*64*1024*4) #endif constexpr int N = NVAL; uint32_t unstable_select_gpu2(int32_t *src, int32_t *dst, uint32_t N, int32_t pred, const dim3 &dimGrid, const dim3 &dimBlock, cudaEvent_t &start, cudaEvent_t &stop){ int32_t *buffer; int32_t grid_size = dimGrid.x * dimGrid.y * dimGrid.z; gpu(cudaSetDevice(0)); gpu(cudaMalloc((void**)&buffer, (grid_size * 4 * WARPSIZE + 2)* sizeof(int32_t))); uint32_t* counters = (uint32_t *) (buffer + grid_size * 4 * WARPSIZE); // initialize global counters gpu(cudaMemset(buffer + grid_size * 4 * WARPSIZE - grid_size, 0, (grid_size + 2) * sizeof(int32_t))); int32_t *buffer2; int32_t *dst2; gpu(cudaSetDevice(1)); cudaMalloc((void**)&dst2, N/2*sizeof(int32_t)); gpu(cudaMalloc((void**)&buffer2, (grid_size * 4 * WARPSIZE + 2)* sizeof(int32_t))); uint32_t* counters2 = (uint32_t *) (buffer2 + grid_size * 4 * WARPSIZE); // initialize global counters gpu(cudaMemset(buffer2 + grid_size * 4 * WARPSIZE - grid_size, 0, (grid_size + 2) * sizeof(int32_t))); gpu(cudaSetDevice(0)); cudaEventRecord(start); size_t shared_mem = (9 * dimBlock.x * dimBlock.y + BRDCSTMEM(dimBlock) + ((dimBlock.x * dimBlock.y) / WARPSIZE))*sizeof(int32_t); // run kernel unstable_select<<<dimGrid, dimBlock, shared_mem>>>(src, dst, N/2, pred, buffer, counters, counters+1); gpu(cudaSetDevice(1)); // run kernel cudaStreamWaitEvent(NULL, start, 0); //only for correctly counting the time for both kernels unstable_select<<<dimGrid, dimBlock, shared_mem>>>(src+N/2, dst2, N/2, pred, buffer2, counters2, counters2+1); gpu(cudaSetDevice(0)); #ifndef NDEBUG gpu(cudaPeekAtLastError() ); gpu(cudaDeviceSynchronize()); #endif // wait to read counters from device uint32_t h_counters[2]; gpu(cudaMemcpy(h_counters, counters, 2 * sizeof(uint32_t), cudaMemcpyDefault)); uint32_t h_output_size = h_counters[0]; uint32_t h_buffer_end = h_counters[1]; uint32_t h_buffer_start= (h_counters[1]/(4*WARPSIZE))*(4*WARPSIZE); uint32_t h_buffer_size = h_buffer_end - h_buffer_start; assert(h_buffer_start % (4*WARPSIZE) == 0); assert(h_buffer_end >= h_buffer_start); assert(h_buffer_size < 4*WARPSIZE); // combine results if (h_buffer_size > 0) cudaMemcpy(dst+h_output_size, buffer+h_buffer_start, h_buffer_size * sizeof(int32_t), cudaMemcpyDefault); gpu(cudaSetDevice(1)); #ifndef NDEBUG gpu(cudaPeekAtLastError() ); gpu(cudaDeviceSynchronize()); #endif gpu(cudaMemcpy(h_counters, counters2, 2 * sizeof(uint32_t), cudaMemcpyDefault)); uint32_t h_output_size2 = h_counters[0]; uint32_t h_buffer_end2 = h_counters[1]; uint32_t h_buffer_start2= (h_counters[1]/(4*WARPSIZE))*(4*WARPSIZE); uint32_t h_buffer_size2 = h_buffer_end2 - h_buffer_start2; assert(h_buffer_start2 % (4*WARPSIZE) == 0); assert(h_buffer_end2 >= h_buffer_start2); assert(h_buffer_size2 < 4*WARPSIZE); // combine results if (h_buffer_size2 > 0) cudaMemcpy(dst+h_output_size+h_buffer_size+h_output_size2, buffer2+h_buffer_start2, h_buffer_size2 * sizeof(int32_t), cudaMemcpyDefault); gpu(cudaSetDevice(1)); gpu(cudaMemcpy(dst+h_output_size+h_buffer_size, dst2, h_output_size2*sizeof(int32_t), cudaMemcpyDefault)); gpu(cudaSetDevice(0)); cudaEventRecord(stop); gpu(cudaSetDevice(0)); gpu(cudaFree(dst2)); return h_output_size+h_buffer_size+h_output_size2+h_buffer_size2; } int32_t *a; int32_t *b; int main(){ gpu(cudaSetDevice(1)); gpu(cudaFree(0)); //initialize devices on demand gpu(cudaSetDevice(0)); gpu(cudaFree(0)); //initialize devices on demand srand(time(0)); a = (int32_t*) malloc(N*sizeof(int32_t)); b = (int32_t*) malloc(N*sizeof(int32_t)); for (int i = 0 ; i < N ; ++i) a[i] = rand() % 100 + 1; // char *ad; // int *bd; // const int csize = N*sizeof(char); // const int isize = N*sizeof(int); double millis = 0; { auto start = chrono::high_resolution_clock::now(); stable_select_cpu(a, b, N); auto end = chrono::high_resolution_clock::now(); auto diff = end - start; millis = chrono::duration<double, milli>(diff).count(); cout << millis << " ms" << endl; } int32_t *a_pinned; int32_t *b_pinned; cudaEvent_t start, stop, start1, stop1, start2, stop2; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaEventCreate(&start2); cudaEventCreate(&stop2); dim3 dimBlock(1024, 1 ); dim3 dimGrid( 8, 1 ); cudaMallocHost((void**)&a_pinned, N*sizeof(int32_t)); cudaMallocHost((void**)&b_pinned, N*sizeof(int32_t)); memcpy(a_pinned, a, N*sizeof(int32_t)); #ifndef NTESTUVA int results1; { auto ts = chrono::high_resolution_clock::now(); results1 = unstable_select_gpu2(a_pinned, b_pinned, N, 50, dimGrid, dimBlock, start1, stop1); auto te = chrono::high_resolution_clock::now(); auto diff = te - ts; auto millis = chrono::duration<double, milli>(diff).count(); cout << millis << " ms" << endl; } #else int results1 = 0; #endif #ifndef NTESTMEMCPY int32_t *a_dev, *b_dev; gpu(cudaMalloc( (void**)&a_dev, N*sizeof(int32_t))); gpu(cudaMalloc( (void**)&b_dev, N*sizeof(int32_t))); cudaEventRecord(start); gpu(cudaMemcpy( a_dev, a_pinned, N*sizeof(int32_t), cudaMemcpyDefault)); int results2 = unstable_select_gpu(a_dev, b_dev, N, 50, dimGrid, dimBlock, start2, stop2); gpu(cudaMemcpy(a_pinned, b_dev, N*sizeof(int32_t), cudaMemcpyDefault)); cudaEventRecord(stop); gpu(cudaFree(a_dev)); gpu(cudaFree(b_dev)); #else int results2 = 0; #endif cudaEventSynchronize(stop); cudaDeviceSynchronize(); #ifndef NDEBUG int results = N; for (int i = 0 ; i < N ; ++i) { if (b[i] == -1) { results = i; break; } else { assert(b[i] <= 50); assert(b[i] > 0); } } #ifndef NTESTUVA for (int i = 0 ; i < results1 ; ++i) { if (b_pinned[i] <= 0 || b_pinned[i] > 50){ cout << b_pinned[i] << " " << i << endl; } // assert(b_pinned[i] <= 50); // assert(b_pinned[i] > 0); } #endif #ifndef NTESTMEMCPY for (int i = 0 ; i < results2 ; ++i) { if (a_pinned[i] <= 0 || a_pinned[i] > 50){ cout << a_pinned[i] << " " << i << endl; } // assert(b_pinned[i] <= 50); // assert(b_pinned[i] > 0); } #endif cout << results << " " << results1 << " " << results2 << " " << a_pinned[4] << endl; // assert(results1 == results2); if (results != results1){ cout << "Wrong results!!!!!!" << endl; } else { sort(b_pinned, b_pinned + results); sort(b , b + results); for (int i = 0 ; i < results ; ++i){ if (b[i] != b_pinned[i]){ cout << "Wrong result: " << b_pinned[i] << " (vs " << b[i] << ") @" << i << " !!!!!!" << endl; exit(-1); } } } #endif gpu(cudaFreeHost(a_pinned)); gpu(cudaFreeHost(b_pinned)); float milliseconds1 = 0; cudaEventElapsedTime(&milliseconds1, start, stop); cout << milliseconds1 << endl; float milliseconds2 = 0; cudaEventElapsedTime(&milliseconds2, start1, stop1); cout << milliseconds2 << endl; float milliseconds3 = 0; cudaEventElapsedTime(&milliseconds3, start2, stop2); cout << milliseconds3 << endl; cout << endl; cout << millis/milliseconds1 << endl; cout << millis/milliseconds2 << endl; cout << millis/milliseconds3 << endl; cudaDeviceSynchronize(); cudaDeviceReset(); return EXIT_SUCCESS; }
0aa2752444d636441cff34c6d7e93ceb6fe1132c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "topp_initialization_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; bool *finished = NULL; hipMalloc(&finished, XSIZE*YSIZE); int *sequence_length = NULL; hipMalloc(&sequence_length, XSIZE*YSIZE); int *word_ids = NULL; hipMalloc(&word_ids, XSIZE*YSIZE); int *topp_id_val_buf = NULL; hipMalloc(&topp_id_val_buf, XSIZE*YSIZE); int *topp_offset_buf = NULL; hipMalloc(&topp_offset_buf, XSIZE*YSIZE); const int batch_size = 1; const int vocab_size = 1; const int start_id = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( topp_initialization_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, finished,sequence_length,word_ids,topp_id_val_buf,topp_offset_buf,batch_size,vocab_size,start_id); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( topp_initialization_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, finished,sequence_length,word_ids,topp_id_val_buf,topp_offset_buf,batch_size,vocab_size,start_id); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( topp_initialization_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, finished,sequence_length,word_ids,topp_id_val_buf,topp_offset_buf,batch_size,vocab_size,start_id); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0aa2752444d636441cff34c6d7e93ceb6fe1132c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "topp_initialization_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; bool *finished = NULL; cudaMalloc(&finished, XSIZE*YSIZE); int *sequence_length = NULL; cudaMalloc(&sequence_length, XSIZE*YSIZE); int *word_ids = NULL; cudaMalloc(&word_ids, XSIZE*YSIZE); int *topp_id_val_buf = NULL; cudaMalloc(&topp_id_val_buf, XSIZE*YSIZE); int *topp_offset_buf = NULL; cudaMalloc(&topp_offset_buf, XSIZE*YSIZE); const int batch_size = 1; const int vocab_size = 1; const int start_id = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); topp_initialization_kernel<<<gridBlock,threadBlock>>>(finished,sequence_length,word_ids,topp_id_val_buf,topp_offset_buf,batch_size,vocab_size,start_id); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { topp_initialization_kernel<<<gridBlock,threadBlock>>>(finished,sequence_length,word_ids,topp_id_val_buf,topp_offset_buf,batch_size,vocab_size,start_id); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { topp_initialization_kernel<<<gridBlock,threadBlock>>>(finished,sequence_length,word_ids,topp_id_val_buf,topp_offset_buf,batch_size,vocab_size,start_id); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2df36f1b6d80dbad9100a21a89f8810a3204a564.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * cuda_time_array_v01.cu * * Created on: Nov 23, 2013 * Author: cuda */ #define DEBUG #include "libraries.h" #include "typedefs.h" #include "GPU_libraries.cuh" #include "math_func.h" #include "functions.h" #include "my_kernels.cuh" #define PRINT_RESULT_CPU_NO // Print the calculated results #define PRINT_RESULT_GPU_NO // Print the calculated results int main (int argc, char *argv[]) { int i; int threadsPerBlock = 256; // Threads per Block int blocksPerGrid; // Blocks per Grid distr_var *array = NULL; distr_var *d_array = NULL; struct timeval *Time_results = NULL; struct timeval *d_Time_results = NULL; struct timeval t0; // measuring the cpu time struct timeval t1; // measuring the cpu time long elapsed; unsigned long *sec; unsigned long *h_sec; unsigned long *ph_sec; unsigned long *usec; unsigned long *h_usec; unsigned long *ph_usec; float run_time; hipEvent_t start, stop; hiprandState_t *devStates; hipError_t err = hipSuccess; input_var *input = input_check(argc,argv); // grab the input from user /* Functions */ // mix the array and fix the distributions according user request array=distribution_mix(input->globalmix,input->numofdistr,input->accurate); // permutate the array. permutate(array, input->accurate); // create events for time measuring hipEventCreate(&start); hipEventCreate(&stop); /* Out put Times */ /* Allocate Space for the results */ // Time structs Time_results=(struct timeval *)malloc(sizeof(struct timeval)*input->accurate); // Array for the seconds h_sec=(unsigned long *)malloc(sizeof(unsigned long)*input->accurate); // Array for the uSeconds h_usec=(unsigned long *)malloc(sizeof(unsigned long)*input->accurate); printf("Size of the calculated ensemble : %i \n",input->accurate); /*CPU Calculation*/ gettimeofday(&t0,NULL); for(i=0;i<input->accurate;i++) { delay_fix(&Time_results[i],&array[i]); } gettimeofday(&t1,NULL); elapsed = (t1.tv_sec-t0.tv_sec)*1000000 + t1.tv_usec-t0.tv_usec; printf("The CPU processing elapsed : %f ms\n",(double)elapsed/1000); #ifdef PRINT_RESULT_CPU for(i=0;i<input->accurate;i++){ // Print the time for each printf("%4i) sec:%1li usec:%4li\n",i,Time_results[i].tv_sec,Time_results[i].tv_usec); } #endif /*Pinned Memory*/ // Allocate err = hipHostMalloc((void **)&ph_sec,(size_t)(sizeof(unsigned long)*input->accurate),hipHostMallocDefault); if (err != hipSuccess) { fprintf(stderr,"GPU Pinned Mem.\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate err = hipHostMalloc((void **)&ph_usec,(size_t)(sizeof(unsigned long)*input->accurate),hipHostMallocDefault); if (err != hipSuccess) { fprintf(stderr,"GPU Pinned Mem.\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // /*Allocate pinned memory and initalize it.*/ // err = hipHostMalloc((void **)&d_array,(size_t)(sizeof(distr_var)*input->accurate),hipHostMallocDefault); // if (err != hipSuccess) // { // fprintf(stderr,"GPU MEM\n", hipGetErrorString(err)); // exit(EXIT_FAILURE); // } // d_array=distribution_mix(input->globalmix,input->numofdistr,input->accurate); // ////////////////////////////////////////// // Allocate the seed. err = hipMalloc((void **)&devStates,(size_t)(input->accurate*sizeof(hiprandState_t))); if (err != hipSuccess) { fprintf(stderr,"GPU States MEM\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate data array err = hipMalloc((void **)&d_array,(size_t)(sizeof(distr_var)*input->accurate)); if (err != hipSuccess) { fprintf(stderr,"GPU MEM\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the seconds array err = hipMalloc((void **)&sec,(size_t)(sizeof(unsigned long)*input->accurate)); if (err != hipSuccess) { fprintf(stderr,"GPU MEM\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the useconds array err = hipMalloc((void **)&usec,(size_t)(sizeof(unsigned long)*input->accurate)); if (err != hipSuccess) { fprintf(stderr,"GPU MEM\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Memory copy the data array err = hipMemcpy(d_array, array, (size_t)(sizeof(distr_var)*input->accurate), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr,"GPU in MEMCPY\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Initialize the kernel and calculate the blocks and threads per block blocksPerGrid=(input->accurate + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipEventRecord(start, 0); // start the timer. hipLaunchKernelGGL(( super_kernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, devStates,ph_sec,ph_usec,d_array,input->accurate); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipEventRecord(stop, 0); // stop the timer hipEventSynchronize(stop); // sync the event in order to stop. hipEventElapsedTime(&run_time, start, stop); // calculate the time printf ("Time for the kernel: %f ms\n", run_time); // Retrieve data from the gpu. // Seconds section err = hipMemcpy(h_sec,sec,(size_t)(sizeof(unsigned long)*input->accurate),hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr,"GPU out MEMCPY\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // USeconds section err = hipMemcpy(h_usec,usec,(size_t)(sizeof(unsigned long)*input->accurate),hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr,"GPU out MEMCPY\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } /* Print the Results for debug */ #ifdef PRINT_RESULT_GPU for(i=0;i<input->accurate;i++) { //printf("%4i) sec:%4li usec:%6li\n",i,h_sec[i],h_usec[i]); printf("%6li,\n",ph_usec[i]); fflush(stderr); } #endif /* Free Memory Section */ // GPU hipFree(d_array); hipFree(d_Time_results); // CPU free(array); free(Time_results); free(input->globalmix); free(input); return 0; }
2df36f1b6d80dbad9100a21a89f8810a3204a564.cu
/* * cuda_time_array_v01.cu * * Created on: Nov 23, 2013 * Author: cuda */ #define DEBUG #include "libraries.h" #include "typedefs.h" #include "GPU_libraries.cuh" #include "math_func.h" #include "functions.h" #include "my_kernels.cuh" #define PRINT_RESULT_CPU_NO // Print the calculated results #define PRINT_RESULT_GPU_NO // Print the calculated results int main (int argc, char *argv[]) { int i; int threadsPerBlock = 256; // Threads per Block int blocksPerGrid; // Blocks per Grid distr_var *array = NULL; distr_var *d_array = NULL; struct timeval *Time_results = NULL; struct timeval *d_Time_results = NULL; struct timeval t0; // measuring the cpu time struct timeval t1; // measuring the cpu time long elapsed; unsigned long *sec; unsigned long *h_sec; unsigned long *ph_sec; unsigned long *usec; unsigned long *h_usec; unsigned long *ph_usec; float run_time; cudaEvent_t start, stop; curandState *devStates; cudaError_t err = cudaSuccess; input_var *input = input_check(argc,argv); // grab the input from user /* Functions */ // mix the array and fix the distributions according user request array=distribution_mix(input->globalmix,input->numofdistr,input->accurate); // permutate the array. permutate(array, input->accurate); // create events for time measuring cudaEventCreate(&start); cudaEventCreate(&stop); /* Out put Times */ /* Allocate Space for the results */ // Time structs Time_results=(struct timeval *)malloc(sizeof(struct timeval)*input->accurate); // Array for the seconds h_sec=(unsigned long *)malloc(sizeof(unsigned long)*input->accurate); // Array for the uSeconds h_usec=(unsigned long *)malloc(sizeof(unsigned long)*input->accurate); printf("Size of the calculated ensemble : %i \n",input->accurate); /*CPU Calculation*/ gettimeofday(&t0,NULL); for(i=0;i<input->accurate;i++) { delay_fix(&Time_results[i],&array[i]); } gettimeofday(&t1,NULL); elapsed = (t1.tv_sec-t0.tv_sec)*1000000 + t1.tv_usec-t0.tv_usec; printf("The CPU processing elapsed : %f ms\n",(double)elapsed/1000); #ifdef PRINT_RESULT_CPU for(i=0;i<input->accurate;i++){ // Print the time for each printf("%4i) sec:%1li usec:%4li\n",i,Time_results[i].tv_sec,Time_results[i].tv_usec); } #endif /*Pinned Memory*/ // Allocate err = cudaHostAlloc((void **)&ph_sec,(size_t)(sizeof(unsigned long)*input->accurate),cudaHostAllocDefault); if (err != cudaSuccess) { fprintf(stderr,"GPU Pinned Mem.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate err = cudaHostAlloc((void **)&ph_usec,(size_t)(sizeof(unsigned long)*input->accurate),cudaHostAllocDefault); if (err != cudaSuccess) { fprintf(stderr,"GPU Pinned Mem.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // /*Allocate pinned memory and initalize it.*/ // err = cudaHostAlloc((void **)&d_array,(size_t)(sizeof(distr_var)*input->accurate),cudaHostAllocDefault); // if (err != cudaSuccess) // { // fprintf(stderr,"GPU MEM\n", cudaGetErrorString(err)); // exit(EXIT_FAILURE); // } // d_array=distribution_mix(input->globalmix,input->numofdistr,input->accurate); // ////////////////////////////////////////// // Allocate the seed. err = cudaMalloc((void **)&devStates,(size_t)(input->accurate*sizeof(curandState))); if (err != cudaSuccess) { fprintf(stderr,"GPU States MEM\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate data array err = cudaMalloc((void **)&d_array,(size_t)(sizeof(distr_var)*input->accurate)); if (err != cudaSuccess) { fprintf(stderr,"GPU MEM\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the seconds array err = cudaMalloc((void **)&sec,(size_t)(sizeof(unsigned long)*input->accurate)); if (err != cudaSuccess) { fprintf(stderr,"GPU MEM\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the useconds array err = cudaMalloc((void **)&usec,(size_t)(sizeof(unsigned long)*input->accurate)); if (err != cudaSuccess) { fprintf(stderr,"GPU MEM\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Memory copy the data array err = cudaMemcpy(d_array, array, (size_t)(sizeof(distr_var)*input->accurate), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr,"GPU in MEMCPY\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Initialize the kernel and calculate the blocks and threads per block blocksPerGrid=(input->accurate + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); cudaEventRecord(start, 0); // start the timer. super_kernel<<<blocksPerGrid,threadsPerBlock>>>(devStates,ph_sec,ph_usec,d_array,input->accurate); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaEventRecord(stop, 0); // stop the timer cudaEventSynchronize(stop); // sync the event in order to stop. cudaEventElapsedTime(&run_time, start, stop); // calculate the time printf ("Time for the kernel: %f ms\n", run_time); // Retrieve data from the gpu. // Seconds section err = cudaMemcpy(h_sec,sec,(size_t)(sizeof(unsigned long)*input->accurate),cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr,"GPU out MEMCPY\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // USeconds section err = cudaMemcpy(h_usec,usec,(size_t)(sizeof(unsigned long)*input->accurate),cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr,"GPU out MEMCPY\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Print the Results for debug */ #ifdef PRINT_RESULT_GPU for(i=0;i<input->accurate;i++) { //printf("%4i) sec:%4li usec:%6li\n",i,h_sec[i],h_usec[i]); printf("%6li,\n",ph_usec[i]); fflush(stderr); } #endif /* Free Memory Section */ // GPU cudaFree(d_array); cudaFree(d_Time_results); // CPU free(array); free(Time_results); free(input->globalmix); free(input); return 0; }
bfcc7ef375fe73365c962cc39da155888c613ff8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <chrono> #define P1 55 #define P2 119 #define P3 179 #define P4 256 #define LWDR 32 #define LKNB 8 typedef unsigned int uint32_t; void LFIB4(uint32_t n, uint32_t *x) { for (uint32_t k = P4; k < n; k++) { x[k] = x[k - P1] + x[k - P2] + x[k - P3] + x[k - P4]; } } __global__ void firstColGPU(uint32_t *x, int s) { __shared__ uint32_t cx[2 * P4]; uint32_t *px = &cx[P4]; int myid = threadIdx.x; cx[myid] = x[myid]; __syncthreads(); for (int k = 1; k < s / P4; k++) { for (int i = 0; i < P4; i += LWDR) { if (myid < LWDR) { px[i + myid] = px[i + myid - P1] + px[i + myid - P2] + px[i + myid - P3] + px[i + myid - P4]; } __syncthreads(); } x[k * P4 + myid] = cx[myid] = px[myid]; __syncthreads(); } } __global__ void colYGPU(uint32_t *y, int s) { __shared__ uint32_t cy[3 * P4]; uint32_t *ay = &cy[P4 * 2]; int myid = threadIdx.x; ay[myid] = y[2 * P4 + myid]; __syncthreads(); for (int k = 0; k < s / P4; k++) { cy[myid] = cy[myid + P4]; cy[myid + P4] = ay[myid]; __syncthreads(); for (int i = 0; i < P4; i += LWDR) { if (myid < LWDR) { ay[i + myid] = ay[i + myid - P1] + ay[i + myid - P2] + ay[i + myid - P3] + ay[i + myid - P4]; } __syncthreads(); } } y[2 * P4 + myid] = cy[2 * P4 + myid]; y[P4 + myid] = cy[P4 + myid]; y[myid] = cy[myid]; } __global__ void lastEntGPU(uint32_t *__restrict__ x, uint32_t *__restrict__ y, int s, int r) { __shared__ uint32_t a0[3 * P4]; __shared__ uint32_t b0[2 * P4]; __shared__ uint32_t c0[2 * P4]; __shared__ uint32_t d0[2 * P4]; uint32_t *a = a0 + P4; uint32_t *b = b0 + P4; uint32_t *c = c0 + P4; uint32_t *d = d0 + P4; int myid = threadIdx.x; a0[myid] = y[myid]; __syncthreads(); if (myid < P4) a0[myid + P4 * 2] = y[myid + P4 * 2]; __syncthreads(); d0[myid] = c0[myid] = b0[myid] = a[myid]; __syncthreads(); b[myid - P4] += a[-(P4 - P3) + myid]; __syncthreads(); c[myid - P4] += (a[-(P3 - P2) + myid] + a[-(P4 - P2) + myid]); __syncthreads(); d[myid - P4] += (a[-(P2 - P1) + myid] + a[-(P3 - P1) + myid] + a[-(P4 - P1) + myid]); __syncthreads(); a += P4; for (int i = 1; i < r; i++) { uint32_t *xc = &x[i * s]; uint32_t tmp = 0; if (myid < P4) { for (int k = 0; k < P4 - P3; k++) tmp += xc[-P4 + k] * a[myid - k]; for (int k = 0; k < P3 - P2; k++) tmp += xc[-P3 + k] * b[myid - k]; for (int k = 0; k < P2 - P1; k++) tmp += xc[-P2 + k] * c[myid - k]; for (int k = 0; k < P1; k++) tmp += xc[-P1 + k] * d[myid - k]; xc[s - P4 + myid] = tmp; } __syncthreads(); } } __global__ void colsGPU(uint32_t *x, int s, int r) { int k0 = blockIdx.x * LKNB; // int k1 = threadIdx.x / LWDR; // int k2 = threadIdx.x % LWDR; // __shared__ uint32_t cx[LKNB][2 * P4]; int fcol = (blockIdx.x == 0) ? 1 : 0; int ecol = (blockIdx.x == gridDim.x - 1 && r % LKNB) ? r % LKNB : LKNB; for (int i = fcol; i < ecol; i++) cx[i][threadIdx.x] = x[(k0 + i) * s - P4 + threadIdx.x]; __syncthreads(); uint32_t *pcx = &cx[k1][P4]; for (int k = 0; k < s / P4 - 1; k++) { for (int i = 0; i < P4; i += LWDR) { if (!(blockIdx.x == 0 && threadIdx.x == 0) && !(blockIdx.x == gridDim.x - 1 && k1 >= ecol)) pcx[i + k2] = pcx[i + k2 - P1] + pcx[i + k2 - P2] + pcx[i + k2 - P3] + pcx[i + k2 - P4]; __syncthreads(); } for (int i = fcol; i < ecol; i++) x[(k0 + i) * s + k * P4 + threadIdx.x] = cx[i][threadIdx.x] = cx[i][P4 + threadIdx.x]; __syncthreads(); } } void gLFIB4(uint32_t n, uint32_t *x, int s, int r, uint32_t *seed) { hipMemcpy(x, seed, sizeof(uint32_t) * P4, hipMemcpyHostToDevice); uint32_t *y; uint32_t one = 1; hipMalloc((void **) &y, sizeof(uint32_t) * 3 * P4); hipMemset(y + P4 * 2, 0, P4 * sizeof(uint32_t)); hipMemcpy(y + P4 * 2, &one, sizeof(uint32_t), hipMemcpyHostToDevice); hipStream_t cstr1; hipStream_t cstr2; hipStreamCreate(&cstr1); hipStreamCreate(&cstr2); hipLaunchKernelGGL(firstColGPU, 1, P4, 0, cstr1, x, s); hipLaunchKernelGGL(colYGPU, 1, P4, 0, cstr2, y, s); hipStreamSynchronize(cstr1); hipStreamSynchronize(cstr2); hipLaunchKernelGGL(lastEntGPU, 1, 2 * P4, 0, 0, x, y, s, r); hipLaunchKernelGGL(( colsGPU), dim3(r / LKNB + (r % LKNB ? 1 : 0)), dim3(P4), 0, 0, x, s, r); hipStreamDestroy(cstr1); hipStreamDestroy(cstr2); hipFree(y); } int main(int argc, char**argv) { if (argc < 1) { printf("Usage: ./main <n>\n"); return 1; } uint32_t n = atoi(argv[1]); srand(1234); uint32_t *x = (uint32_t*) malloc(n * sizeof(uint32_t)); for (uint32_t r = 16; r <= 4096; r = r * 2) { uint32_t s = 0; if (s == 0) { s = n / r; s -= (s % 256 == 0 ? 0 : s % 256); while (s * r < n) r++; } printf("n=%d r=%d s=%d\n", n, r, s); uint32_t *z = (uint32_t*) malloc(r * s * sizeof(uint32_t)); for (uint32_t k = 0; k < P4; k++) x[k] = z[k] = rand(); // compute on the host auto start = std::chrono::steady_clock::now(); LFIB4(n, x); auto end = std::chrono::steady_clock::now(); std::chrono::duration<float> host_time = end - start; // compute on the device uint32_t *x_d; hipMalloc((void **) &x_d, sizeof(uint32_t) * r * s); start = std::chrono::steady_clock::now(); gLFIB4(n, x_d, s, r, z); end = std::chrono::steady_clock::now(); std::chrono::duration<float> device_time = end - start; printf("r = %d | host time = %lf | device time = %lf | speedup = %.1f ", r, host_time.count(), device_time.count(), host_time.count() / device_time.count()); // Verify hipMemcpy(z, x_d, sizeof(uint32_t) * n, hipMemcpyDeviceToHost); bool ok = true; for (uint32_t i = 0; i < n; i++) { if (x[i] != z[i]) { ok = false; break; } } printf("check = %s\n", ok ? "PASS" : "FAIL"); free(z); hipFree(x_d); } free(x); return 0; }
bfcc7ef375fe73365c962cc39da155888c613ff8.cu
#include <stdio.h> #include <hip/hip_runtime.h> #include <chrono> #define P1 55 #define P2 119 #define P3 179 #define P4 256 #define LWDR 32 #define LKNB 8 typedef unsigned int uint32_t; void LFIB4(uint32_t n, uint32_t *x) { for (uint32_t k = P4; k < n; k++) { x[k] = x[k - P1] + x[k - P2] + x[k - P3] + x[k - P4]; } } __global__ void firstColGPU(uint32_t *x, int s) { __shared__ uint32_t cx[2 * P4]; uint32_t *px = &cx[P4]; int myid = threadIdx.x; cx[myid] = x[myid]; __syncthreads(); for (int k = 1; k < s / P4; k++) { for (int i = 0; i < P4; i += LWDR) { if (myid < LWDR) { px[i + myid] = px[i + myid - P1] + px[i + myid - P2] + px[i + myid - P3] + px[i + myid - P4]; } __syncthreads(); } x[k * P4 + myid] = cx[myid] = px[myid]; __syncthreads(); } } __global__ void colYGPU(uint32_t *y, int s) { __shared__ uint32_t cy[3 * P4]; uint32_t *ay = &cy[P4 * 2]; int myid = threadIdx.x; ay[myid] = y[2 * P4 + myid]; __syncthreads(); for (int k = 0; k < s / P4; k++) { cy[myid] = cy[myid + P4]; cy[myid + P4] = ay[myid]; __syncthreads(); for (int i = 0; i < P4; i += LWDR) { if (myid < LWDR) { ay[i + myid] = ay[i + myid - P1] + ay[i + myid - P2] + ay[i + myid - P3] + ay[i + myid - P4]; } __syncthreads(); } } y[2 * P4 + myid] = cy[2 * P4 + myid]; y[P4 + myid] = cy[P4 + myid]; y[myid] = cy[myid]; } __global__ void lastEntGPU(uint32_t *__restrict__ x, uint32_t *__restrict__ y, int s, int r) { __shared__ uint32_t a0[3 * P4]; __shared__ uint32_t b0[2 * P4]; __shared__ uint32_t c0[2 * P4]; __shared__ uint32_t d0[2 * P4]; uint32_t *a = a0 + P4; uint32_t *b = b0 + P4; uint32_t *c = c0 + P4; uint32_t *d = d0 + P4; int myid = threadIdx.x; a0[myid] = y[myid]; __syncthreads(); if (myid < P4) a0[myid + P4 * 2] = y[myid + P4 * 2]; __syncthreads(); d0[myid] = c0[myid] = b0[myid] = a[myid]; __syncthreads(); b[myid - P4] += a[-(P4 - P3) + myid]; __syncthreads(); c[myid - P4] += (a[-(P3 - P2) + myid] + a[-(P4 - P2) + myid]); __syncthreads(); d[myid - P4] += (a[-(P2 - P1) + myid] + a[-(P3 - P1) + myid] + a[-(P4 - P1) + myid]); __syncthreads(); a += P4; for (int i = 1; i < r; i++) { uint32_t *xc = &x[i * s]; uint32_t tmp = 0; if (myid < P4) { for (int k = 0; k < P4 - P3; k++) tmp += xc[-P4 + k] * a[myid - k]; for (int k = 0; k < P3 - P2; k++) tmp += xc[-P3 + k] * b[myid - k]; for (int k = 0; k < P2 - P1; k++) tmp += xc[-P2 + k] * c[myid - k]; for (int k = 0; k < P1; k++) tmp += xc[-P1 + k] * d[myid - k]; xc[s - P4 + myid] = tmp; } __syncthreads(); } } __global__ void colsGPU(uint32_t *x, int s, int r) { int k0 = blockIdx.x * LKNB; // int k1 = threadIdx.x / LWDR; // int k2 = threadIdx.x % LWDR; // __shared__ uint32_t cx[LKNB][2 * P4]; int fcol = (blockIdx.x == 0) ? 1 : 0; int ecol = (blockIdx.x == gridDim.x - 1 && r % LKNB) ? r % LKNB : LKNB; for (int i = fcol; i < ecol; i++) cx[i][threadIdx.x] = x[(k0 + i) * s - P4 + threadIdx.x]; __syncthreads(); uint32_t *pcx = &cx[k1][P4]; for (int k = 0; k < s / P4 - 1; k++) { for (int i = 0; i < P4; i += LWDR) { if (!(blockIdx.x == 0 && threadIdx.x == 0) && !(blockIdx.x == gridDim.x - 1 && k1 >= ecol)) pcx[i + k2] = pcx[i + k2 - P1] + pcx[i + k2 - P2] + pcx[i + k2 - P3] + pcx[i + k2 - P4]; __syncthreads(); } for (int i = fcol; i < ecol; i++) x[(k0 + i) * s + k * P4 + threadIdx.x] = cx[i][threadIdx.x] = cx[i][P4 + threadIdx.x]; __syncthreads(); } } void gLFIB4(uint32_t n, uint32_t *x, int s, int r, uint32_t *seed) { hipMemcpy(x, seed, sizeof(uint32_t) * P4, hipMemcpyHostToDevice); uint32_t *y; uint32_t one = 1; hipMalloc((void **) &y, sizeof(uint32_t) * 3 * P4); hipMemset(y + P4 * 2, 0, P4 * sizeof(uint32_t)); hipMemcpy(y + P4 * 2, &one, sizeof(uint32_t), hipMemcpyHostToDevice); hipStream_t cstr1; hipStream_t cstr2; hipStreamCreate(&cstr1); hipStreamCreate(&cstr2); hipLaunchKernelGGL(firstColGPU, 1, P4, 0, cstr1, x, s); hipLaunchKernelGGL(colYGPU, 1, P4, 0, cstr2, y, s); hipStreamSynchronize(cstr1); hipStreamSynchronize(cstr2); hipLaunchKernelGGL(lastEntGPU, 1, 2 * P4, 0, 0, x, y, s, r); colsGPU<<<r / LKNB + (r % LKNB ? 1 : 0), P4>>>(x, s, r); hipStreamDestroy(cstr1); hipStreamDestroy(cstr2); hipFree(y); } int main(int argc, char**argv) { if (argc < 1) { printf("Usage: ./main <n>\n"); return 1; } uint32_t n = atoi(argv[1]); srand(1234); uint32_t *x = (uint32_t*) malloc(n * sizeof(uint32_t)); for (uint32_t r = 16; r <= 4096; r = r * 2) { uint32_t s = 0; if (s == 0) { s = n / r; s -= (s % 256 == 0 ? 0 : s % 256); while (s * r < n) r++; } printf("n=%d r=%d s=%d\n", n, r, s); uint32_t *z = (uint32_t*) malloc(r * s * sizeof(uint32_t)); for (uint32_t k = 0; k < P4; k++) x[k] = z[k] = rand(); // compute on the host auto start = std::chrono::steady_clock::now(); LFIB4(n, x); auto end = std::chrono::steady_clock::now(); std::chrono::duration<float> host_time = end - start; // compute on the device uint32_t *x_d; hipMalloc((void **) &x_d, sizeof(uint32_t) * r * s); start = std::chrono::steady_clock::now(); gLFIB4(n, x_d, s, r, z); end = std::chrono::steady_clock::now(); std::chrono::duration<float> device_time = end - start; printf("r = %d | host time = %lf | device time = %lf | speedup = %.1f ", r, host_time.count(), device_time.count(), host_time.count() / device_time.count()); // Verify hipMemcpy(z, x_d, sizeof(uint32_t) * n, hipMemcpyDeviceToHost); bool ok = true; for (uint32_t i = 0; i < n; i++) { if (x[i] != z[i]) { ok = false; break; } } printf("check = %s\n", ok ? "PASS" : "FAIL"); free(z); hipFree(x_d); } free(x); return 0; }
4541ec1c2f942a2457637b6fdd45aaefce019fed.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { hipError_t error = hipGetLastError (); if (error != hipSuccess) { printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error)); exit(-1); } } __global__ void hypterm (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_k= (int)(blockDim.z); int k0 = (int)(blockIdx.z)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.z); double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0; double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1; double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2; double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3; double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4; double (*q_1)[308][308] = (double (*)[308][308])q_in_1; double (*q_2)[308][308] = (double (*)[308][308])q_in_2; double (*q_3)[308][308] = (double (*)[308][308])q_in_3; double (*q_4)[308][308] = (double (*)[308][308])q_in_4; double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1; double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2; double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3; double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4; if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) { double _t_1_ = cons_1[k][j][i+1]; _t_1_ -= cons_1[k][j][i-1]; double _t_0_ = 0.8 * _t_1_; double _t_2_ = cons_1[k][j][i+2]; _t_2_ -= cons_1[k][j][i-2]; _t_0_ -= 0.2 * _t_2_; double _t_3_ = cons_1[k][j][i+3]; _t_3_ -= cons_1[k][j][i-3]; _t_0_ += 0.038 * _t_3_; double _t_4_ = cons_1[k][j][i+4]; _t_4_ -= cons_1[k][j][i-4]; _t_0_ -= 0.0035 * _t_4_; double flux_0kc0jc0ic0 = _t_0_ * dxinv0; double _t_6_ = cons_1[k][j][i+1] * q_1[k][j][i+1]; _t_6_ -= cons_1[k][j][i-1] * q_1[k][j][i-1]; _t_6_ += q_4[k][j][i+1]; _t_6_ -= q_4[k][j][i-1]; double _t_5_ = 0.8 * _t_6_; double _t_7_ = cons_1[k][j][i+2] * q_1[k][j][i+2]; _t_7_ -= cons_1[k][j][i-2] * q_1[k][j][i-2]; _t_7_ += q_4[k][j][i+2]; _t_7_ -= q_4[k][j][i-2]; _t_5_ -= 0.2 * _t_7_; double _t_8_ = cons_1[k][j][i+3] * q_1[k][j][i+3]; _t_8_ -= cons_1[k][j][i-3] * q_1[k][j][i-3]; _t_8_ += q_4[k][j][i+3]; _t_8_ -= q_4[k][j][i-3]; _t_5_ += 0.038 * _t_8_; double _t_9_ = cons_1[k][j][i+4] * q_1[k][j][i+4]; _t_9_ -= cons_1[k][j][i-4] * q_1[k][j][i-4]; _t_9_ += q_4[k][j][i+4]; _t_9_ -= q_4[k][j][i-4]; _t_5_ -= 0.0035 * _t_9_; double flux_1kc0jc0ic0 = _t_5_ * dxinv0; double _t_11_ = cons_2[k][j][i+1] * q_1[k][j][i+1]; _t_11_ -= cons_2[k][j][i-1] * q_1[k][j][i-1]; double _t_10_ = 0.8 * _t_11_; double _t_12_ = cons_2[k][j][i+2] * q_1[k][j][i+2]; _t_12_ -= cons_2[k][j][i-2] * q_1[k][j][i-2]; _t_10_ -= 0.2 * _t_12_; double _t_13_ = cons_2[k][j][i+3] * q_1[k][j][i+3]; _t_13_ -= cons_2[k][j][i-3] * q_1[k][j][i-3]; _t_10_ += 0.038 * _t_13_; double _t_14_ = cons_2[k][j][i+4] * q_1[k][j][i+4]; _t_14_ -= cons_2[k][j][i-4] * q_1[k][j][i-4]; _t_10_ -= 0.0035 * _t_14_; double flux_2kc0jc0ic0 = _t_10_ * dxinv0; double _t_16_ = cons_3[k][j][i+1] * q_1[k][j][i+1]; _t_16_ -= cons_3[k][j][i-1] * q_1[k][j][i-1]; double _t_15_ = 0.8 * _t_16_; double _t_17_ = cons_3[k][j][i+2] * q_1[k][j][i+2]; _t_17_ -= cons_3[k][j][i-2] * q_1[k][j][i-2]; _t_15_ -= 0.2 * _t_17_; double _t_18_ = cons_3[k][j][i+3] * q_1[k][j][i+3]; _t_18_ -= cons_3[k][j][i-3] * q_1[k][j][i-3]; _t_15_ += 0.038 * _t_18_; double _t_19_ = cons_3[k][j][i+4] * q_1[k][j][i+4]; _t_19_ -= cons_3[k][j][i-4] * q_1[k][j][i-4]; _t_15_ -= 0.0035 * _t_19_; double flux_3kc0jc0ic0 = _t_15_ * dxinv0; double _t_21_ = q_4[k][j][i+1] * q_1[k][j][i+1]; double _v_24_ = cons_4[k][j][i+1] * q_1[k][j][i+1]; _t_21_ += _v_24_; _t_21_ -= cons_4[k][j][i-1] * q_1[k][j][i-1]; double _v_27_ = q_4[k][j][i-1] * q_1[k][j][i-1]; _t_21_ -= _v_27_; double _t_20_ = 0.8 * _t_21_; double _t_22_ = q_4[k][j][i+2] * q_1[k][j][i+2]; double _v_28_ = cons_4[k][j][i+2] * q_1[k][j][i+2]; _t_22_ += _v_28_; _t_22_ -= cons_4[k][j][i-2] * q_1[k][j][i-2]; double _v_31_ = q_4[k][j][i-2] * q_1[k][j][i-2]; _t_22_ -= _v_31_; _t_20_ -= 0.2 * _t_22_; double _t_23_ = q_4[k][j][i+3] * q_1[k][j][i+3]; double _v_32_ = cons_4[k][j][i+3] * q_1[k][j][i+3]; _t_23_ += _v_32_; _t_23_ -= cons_4[k][j][i-3] * q_1[k][j][i-3]; double _v_35_ = q_4[k][j][i-3] * q_1[k][j][i-3]; _t_23_ -= _v_35_; _t_20_ += 0.038 * _t_23_; double _t_24_ = q_4[k][j][i+4] * q_1[k][j][i+4]; double _v_36_ = cons_4[k][j][i+4] * q_1[k][j][i+4]; _t_24_ += _v_36_; _t_24_ -= cons_4[k][j][i-4] * q_1[k][j][i-4]; double _v_39_ = q_4[k][j][i-4] * q_1[k][j][i-4]; _t_24_ -= _v_39_; _t_20_ -= 0.0035 * _t_24_; double flux_4kc0jc0ic0 = _t_20_ * dxinv0; double _t_27_ = cons_2[k][j+1][i]; _t_27_ -= cons_2[k][j-1][i]; double _t_26_ = 0.8 * _t_27_; double _t_28_ = cons_2[k][j+2][i]; _t_28_ -= cons_2[k][j-2][i]; _t_26_ -= 0.2 * _t_28_; double _t_29_ = cons_2[k][j+3][i]; _t_29_ -= cons_2[k][j-3][i]; _t_26_ += 0.038 * _t_29_; double _t_30_ = cons_2[k][j+4][i]; _t_30_ -= cons_2[k][j-4][i]; _t_26_ -= 0.0035 * _t_30_; flux_0kc0jc0ic0 -= _t_26_ * dxinv1; double _t_33_ = cons_1[k][j+1][i] * q_2[k][j+1][i]; _t_33_ -= cons_1[k][j-1][i] * q_2[k][j-1][i]; double _t_32_ = 0.8 * _t_33_; double _t_34_ = cons_1[k][j+2][i] * q_2[k][j+2][i]; _t_34_ -= cons_1[k][j-2][i] * q_2[k][j-2][i]; _t_32_ -= 0.2 * _t_34_; double _t_35_ = cons_1[k][j+3][i] * q_2[k][j+3][i]; _t_35_ -= cons_1[k][j-3][i] * q_2[k][j-3][i]; _t_32_ += 0.038 * _t_35_; double _t_36_ = cons_1[k][j+4][i] * q_2[k][j+4][i]; _t_36_ -= cons_1[k][j-4][i] * q_2[k][j-4][i]; _t_32_ -= 0.0035 * _t_36_; flux_1kc0jc0ic0 -= _t_32_ * dxinv1; double _t_39_ = cons_2[k][j+1][i] * q_2[k][j+1][i]; _t_39_ -= cons_2[k][j-1][i] * q_2[k][j-1][i]; _t_39_ += q_4[k][j+1][i]; _t_39_ -= q_4[k][j-1][i]; double _t_38_ = 0.8 * _t_39_; double _t_40_ = cons_2[k][j+2][i] * q_2[k][j+2][i]; _t_40_ -= cons_2[k][j-2][i] * q_2[k][j-2][i]; _t_40_ += q_4[k][j+2][i]; _t_40_ -= q_4[k][j-2][i]; _t_38_ -= 0.2 * _t_40_; double _t_41_ = cons_2[k][j+3][i] * q_2[k][j+3][i]; _t_41_ -= cons_2[k][j-3][i] * q_2[k][j-3][i]; _t_41_ += q_4[k][j+3][i]; _t_41_ -= q_4[k][j-3][i]; _t_38_ += 0.038 * _t_41_; double _t_42_ = cons_2[k][j+4][i] * q_2[k][j+4][i]; _t_42_ -= cons_2[k][j-4][i] * q_2[k][j-4][i]; _t_42_ += q_4[k][j+4][i]; _t_42_ -= q_4[k][j-4][i]; _t_38_ -= 0.0035 * _t_42_; flux_2kc0jc0ic0 -= _t_38_ * dxinv1; double _t_45_ = cons_3[k][j+1][i] * q_2[k][j+1][i]; _t_45_ -= cons_3[k][j-1][i] * q_2[k][j-1][i]; double _t_44_ = 0.8 * _t_45_; double _t_46_ = cons_3[k][j+2][i] * q_2[k][j+2][i]; _t_46_ -= cons_3[k][j-2][i] * q_2[k][j-2][i]; _t_44_ -= 0.2 * _t_46_; double _t_47_ = cons_3[k][j+3][i] * q_2[k][j+3][i]; _t_47_ -= cons_3[k][j-3][i] * q_2[k][j-3][i]; _t_44_ += 0.038 * _t_47_; double _t_48_ = cons_3[k][j+4][i] * q_2[k][j+4][i]; _t_48_ -= cons_3[k][j-4][i] * q_2[k][j-4][i]; _t_44_ -= 0.0035 * _t_48_; flux_3kc0jc0ic0 -= _t_44_ * dxinv1; double _t_51_ = q_4[k][j+1][i] * q_2[k][j+1][i]; double _v_64_ = cons_4[k][j+1][i] * q_2[k][j+1][i]; _t_51_ += _v_64_; _t_51_ -= cons_4[k][j-1][i] * q_2[k][j-1][i]; double _v_67_ = q_4[k][j-1][i] * q_2[k][j-1][i]; _t_51_ -= _v_67_; double _t_50_ = 0.8 * _t_51_; double _t_52_ = q_4[k][j+2][i] * q_2[k][j+2][i]; double _v_68_ = cons_4[k][j+2][i] * q_2[k][j+2][i]; _t_52_ += _v_68_; _t_52_ -= cons_4[k][j-2][i] * q_2[k][j-2][i]; double _v_71_ = q_4[k][j-2][i] * q_2[k][j-2][i]; _t_52_ -= _v_71_; _t_50_ -= 0.2 * _t_52_; double _t_53_ = q_4[k][j+3][i] * q_2[k][j+3][i]; double _v_72_ = cons_4[k][j+3][i] * q_2[k][j+3][i]; _t_53_ += _v_72_; _t_53_ -= cons_4[k][j-3][i] * q_2[k][j-3][i]; double _v_75_ = q_4[k][j-3][i] * q_2[k][j-3][i]; _t_53_ -= _v_75_; _t_50_ += 0.038 * _t_53_; double _t_54_ = q_4[k][j+4][i] * q_2[k][j+4][i]; double _v_76_ = cons_4[k][j+4][i] * q_2[k][j+4][i]; _t_54_ += _v_76_; _t_54_ -= cons_4[k][j-4][i] * q_2[k][j-4][i]; double _v_79_ = q_4[k][j-4][i] * q_2[k][j-4][i]; _t_54_ -= _v_79_; _t_50_ -= 0.0035 * _t_54_; flux_4kc0jc0ic0 -= _t_50_ * dxinv1; double _t_57_ = cons_3[k+1][j][i]; _t_57_ -= cons_3[k-1][j][i]; double _t_56_ = 0.8 * _t_57_; double _t_58_ = cons_3[k+2][j][i]; _t_58_ -= cons_3[k-2][j][i]; _t_56_ -= 0.2 * _t_58_; double _t_59_ = cons_3[k+3][j][i]; _t_59_ -= cons_3[k-3][j][i]; _t_56_ += 0.038 * _t_59_; double _t_60_ = cons_3[k+4][j][i]; _t_60_ -= cons_3[k-4][j][i]; _t_56_ -= 0.0035 * _t_60_; flux_0kc0jc0ic0 -= _t_56_ * dxinv2; double _t_63_ = cons_1[k+1][j][i] * q_3[k+1][j][i]; _t_63_ -= cons_1[k-1][j][i] * q_3[k-1][j][i]; double _t_62_ = 0.8 * _t_63_; double _t_64_ = cons_1[k+2][j][i] * q_3[k+2][j][i]; _t_64_ -= cons_1[k-2][j][i] * q_3[k-2][j][i]; _t_62_ -= 0.2 * _t_64_; double _t_65_ = cons_1[k+3][j][i] * q_3[k+3][j][i]; _t_65_ -= cons_1[k-3][j][i] * q_3[k-3][j][i]; _t_62_ += 0.038 * _t_65_; double _t_66_ = cons_1[k+4][j][i] * q_3[k+4][j][i]; _t_66_ -= cons_1[k-4][j][i] * q_3[k-4][j][i]; _t_62_ -= 0.0035 * _t_66_; flux_1kc0jc0ic0 -= _t_62_ * dxinv2; double _t_69_ = cons_2[k+1][j][i] * q_3[k+1][j][i]; _t_69_ -= cons_2[k-1][j][i] * q_3[k-1][j][i]; double _t_68_ = 0.8 * _t_69_; double _t_70_ = cons_2[k+2][j][i] * q_3[k+2][j][i]; _t_70_ -= cons_2[k-2][j][i] * q_3[k-2][j][i]; _t_68_ -= 0.2 * _t_70_; double _t_71_ = cons_2[k+3][j][i] * q_3[k+3][j][i]; _t_71_ -= cons_2[k-3][j][i] * q_3[k-3][j][i]; _t_68_ += 0.038 * _t_71_; double _t_72_ = cons_2[k+4][j][i] * q_3[k+4][j][i]; _t_72_ -= cons_2[k-4][j][i] * q_3[k-4][j][i]; _t_68_ -= 0.0035 * _t_72_; flux_2kc0jc0ic0 -= _t_68_ * dxinv2; double _t_75_ = cons_3[k+1][j][i] * q_3[k+1][j][i]; _t_75_ -= cons_3[k-1][j][i] * q_3[k-1][j][i]; _t_75_ += q_4[k+1][j][i]; _t_75_ -= q_4[k-1][j][i]; double _t_74_ = 0.8 * _t_75_; double _t_76_ = cons_3[k+2][j][i] * q_3[k+2][j][i]; _t_76_ -= cons_3[k-2][j][i] * q_3[k-2][j][i]; _t_76_ += q_4[k+2][j][i]; _t_76_ -= q_4[k-2][j][i]; _t_74_ -= 0.2 * _t_76_; double _t_77_ = cons_3[k+3][j][i] * q_3[k+3][j][i]; _t_77_ -= cons_3[k-3][j][i] * q_3[k-3][j][i]; _t_77_ += q_4[k+3][j][i]; _t_77_ -= q_4[k-3][j][i]; _t_74_ += 0.038 * _t_77_; double _t_78_ = cons_3[k+4][j][i] * q_3[k+4][j][i]; _t_78_ -= cons_3[k-4][j][i] * q_3[k-4][j][i]; _t_78_ += q_4[k+4][j][i]; _t_78_ -= q_4[k-4][j][i]; _t_74_ -= 0.0035 * _t_78_; flux_3kc0jc0ic0 -= _t_74_ * dxinv2; double _t_81_ = q_4[k+1][j][i] * q_3[k+1][j][i]; double _v_104_ = cons_4[k+1][j][i] * q_3[k+1][j][i]; _t_81_ += _v_104_; _t_81_ -= cons_4[k-1][j][i] * q_3[k-1][j][i]; double _v_107_ = q_4[k-1][j][i] * q_3[k-1][j][i]; _t_81_ -= _v_107_; double _t_80_ = 0.8 * _t_81_; double _t_82_ = q_4[k+2][j][i] * q_3[k+2][j][i]; double _v_108_ = cons_4[k+2][j][i] * q_3[k+2][j][i]; _t_82_ += _v_108_; _t_82_ -= cons_4[k-2][j][i] * q_3[k-2][j][i]; double _v_111_ = q_4[k-2][j][i] * q_3[k-2][j][i]; _t_82_ -= _v_111_; _t_80_ -= 0.2 * _t_82_; double _t_83_ = q_4[k+3][j][i] * q_3[k+3][j][i]; double _v_112_ = cons_4[k+3][j][i] * q_3[k+3][j][i]; _t_83_ += _v_112_; _t_83_ -= cons_4[k-3][j][i] * q_3[k-3][j][i]; double _v_115_ = q_4[k-3][j][i] * q_3[k-3][j][i]; _t_83_ -= _v_115_; _t_80_ += 0.038 * _t_83_; double _t_84_ = q_4[k+4][j][i] * q_3[k+4][j][i]; double _v_116_ = cons_4[k+4][j][i] * q_3[k+4][j][i]; _t_84_ += _v_116_; _t_84_ -= cons_4[k-4][j][i] * q_3[k-4][j][i]; double _v_119_ = q_4[k-4][j][i] * q_3[k-4][j][i]; _t_84_ -= _v_119_; _t_80_ -= 0.0035 * _t_84_; flux_4kc0jc0ic0 -= _t_80_ * dxinv2; flux_0[k][j][i] = flux_0kc0jc0ic0; flux_1[k][j][i] = flux_1kc0jc0ic0; flux_2[k][j][i] = flux_2kc0jc0ic0; flux_3[k][j][i] = flux_3kc0jc0ic0; flux_4[k][j][i] = flux_4kc0jc0ic0; } } extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) { double *flux_0; hipMalloc (&flux_0, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_0\n"); hipMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *flux_1; hipMalloc (&flux_1, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_1\n"); hipMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *flux_2; hipMalloc (&flux_2, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_2\n"); hipMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *flux_3; hipMalloc (&flux_3, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_3\n"); hipMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *flux_4; hipMalloc (&flux_4, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_4\n"); hipMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *cons_1; hipMalloc (&cons_1, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_1\n"); hipMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *cons_2; hipMalloc (&cons_2, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_2\n"); hipMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *cons_3; hipMalloc (&cons_3, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_3\n"); hipMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *cons_4; hipMalloc (&cons_4, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_4\n"); hipMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *q_1; hipMalloc (&q_1, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_1\n"); hipMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *q_2; hipMalloc (&q_2, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_2\n"); hipMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *q_3; hipMalloc (&q_3, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_3\n"); hipMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice); double *q_4; hipMalloc (&q_4, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_4\n"); hipMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice); dim3 blockconfig (16, 4, 4); dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z)); hipLaunchKernelGGL(( hypterm) , dim3(gridconfig), dim3(blockconfig), 0, 0, flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N); hipMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, hipMemcpyDeviceToHost); hipMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, hipMemcpyDeviceToHost); hipMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, hipMemcpyDeviceToHost); hipMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, hipMemcpyDeviceToHost); hipMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, hipMemcpyDeviceToHost); }
4541ec1c2f942a2457637b6fdd45aaefce019fed.cu
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void hypterm (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_k= (int)(blockDim.z); int k0 = (int)(blockIdx.z)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.z); double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0; double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1; double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2; double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3; double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4; double (*q_1)[308][308] = (double (*)[308][308])q_in_1; double (*q_2)[308][308] = (double (*)[308][308])q_in_2; double (*q_3)[308][308] = (double (*)[308][308])q_in_3; double (*q_4)[308][308] = (double (*)[308][308])q_in_4; double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1; double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2; double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3; double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4; if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) { double _t_1_ = cons_1[k][j][i+1]; _t_1_ -= cons_1[k][j][i-1]; double _t_0_ = 0.8 * _t_1_; double _t_2_ = cons_1[k][j][i+2]; _t_2_ -= cons_1[k][j][i-2]; _t_0_ -= 0.2 * _t_2_; double _t_3_ = cons_1[k][j][i+3]; _t_3_ -= cons_1[k][j][i-3]; _t_0_ += 0.038 * _t_3_; double _t_4_ = cons_1[k][j][i+4]; _t_4_ -= cons_1[k][j][i-4]; _t_0_ -= 0.0035 * _t_4_; double flux_0kc0jc0ic0 = _t_0_ * dxinv0; double _t_6_ = cons_1[k][j][i+1] * q_1[k][j][i+1]; _t_6_ -= cons_1[k][j][i-1] * q_1[k][j][i-1]; _t_6_ += q_4[k][j][i+1]; _t_6_ -= q_4[k][j][i-1]; double _t_5_ = 0.8 * _t_6_; double _t_7_ = cons_1[k][j][i+2] * q_1[k][j][i+2]; _t_7_ -= cons_1[k][j][i-2] * q_1[k][j][i-2]; _t_7_ += q_4[k][j][i+2]; _t_7_ -= q_4[k][j][i-2]; _t_5_ -= 0.2 * _t_7_; double _t_8_ = cons_1[k][j][i+3] * q_1[k][j][i+3]; _t_8_ -= cons_1[k][j][i-3] * q_1[k][j][i-3]; _t_8_ += q_4[k][j][i+3]; _t_8_ -= q_4[k][j][i-3]; _t_5_ += 0.038 * _t_8_; double _t_9_ = cons_1[k][j][i+4] * q_1[k][j][i+4]; _t_9_ -= cons_1[k][j][i-4] * q_1[k][j][i-4]; _t_9_ += q_4[k][j][i+4]; _t_9_ -= q_4[k][j][i-4]; _t_5_ -= 0.0035 * _t_9_; double flux_1kc0jc0ic0 = _t_5_ * dxinv0; double _t_11_ = cons_2[k][j][i+1] * q_1[k][j][i+1]; _t_11_ -= cons_2[k][j][i-1] * q_1[k][j][i-1]; double _t_10_ = 0.8 * _t_11_; double _t_12_ = cons_2[k][j][i+2] * q_1[k][j][i+2]; _t_12_ -= cons_2[k][j][i-2] * q_1[k][j][i-2]; _t_10_ -= 0.2 * _t_12_; double _t_13_ = cons_2[k][j][i+3] * q_1[k][j][i+3]; _t_13_ -= cons_2[k][j][i-3] * q_1[k][j][i-3]; _t_10_ += 0.038 * _t_13_; double _t_14_ = cons_2[k][j][i+4] * q_1[k][j][i+4]; _t_14_ -= cons_2[k][j][i-4] * q_1[k][j][i-4]; _t_10_ -= 0.0035 * _t_14_; double flux_2kc0jc0ic0 = _t_10_ * dxinv0; double _t_16_ = cons_3[k][j][i+1] * q_1[k][j][i+1]; _t_16_ -= cons_3[k][j][i-1] * q_1[k][j][i-1]; double _t_15_ = 0.8 * _t_16_; double _t_17_ = cons_3[k][j][i+2] * q_1[k][j][i+2]; _t_17_ -= cons_3[k][j][i-2] * q_1[k][j][i-2]; _t_15_ -= 0.2 * _t_17_; double _t_18_ = cons_3[k][j][i+3] * q_1[k][j][i+3]; _t_18_ -= cons_3[k][j][i-3] * q_1[k][j][i-3]; _t_15_ += 0.038 * _t_18_; double _t_19_ = cons_3[k][j][i+4] * q_1[k][j][i+4]; _t_19_ -= cons_3[k][j][i-4] * q_1[k][j][i-4]; _t_15_ -= 0.0035 * _t_19_; double flux_3kc0jc0ic0 = _t_15_ * dxinv0; double _t_21_ = q_4[k][j][i+1] * q_1[k][j][i+1]; double _v_24_ = cons_4[k][j][i+1] * q_1[k][j][i+1]; _t_21_ += _v_24_; _t_21_ -= cons_4[k][j][i-1] * q_1[k][j][i-1]; double _v_27_ = q_4[k][j][i-1] * q_1[k][j][i-1]; _t_21_ -= _v_27_; double _t_20_ = 0.8 * _t_21_; double _t_22_ = q_4[k][j][i+2] * q_1[k][j][i+2]; double _v_28_ = cons_4[k][j][i+2] * q_1[k][j][i+2]; _t_22_ += _v_28_; _t_22_ -= cons_4[k][j][i-2] * q_1[k][j][i-2]; double _v_31_ = q_4[k][j][i-2] * q_1[k][j][i-2]; _t_22_ -= _v_31_; _t_20_ -= 0.2 * _t_22_; double _t_23_ = q_4[k][j][i+3] * q_1[k][j][i+3]; double _v_32_ = cons_4[k][j][i+3] * q_1[k][j][i+3]; _t_23_ += _v_32_; _t_23_ -= cons_4[k][j][i-3] * q_1[k][j][i-3]; double _v_35_ = q_4[k][j][i-3] * q_1[k][j][i-3]; _t_23_ -= _v_35_; _t_20_ += 0.038 * _t_23_; double _t_24_ = q_4[k][j][i+4] * q_1[k][j][i+4]; double _v_36_ = cons_4[k][j][i+4] * q_1[k][j][i+4]; _t_24_ += _v_36_; _t_24_ -= cons_4[k][j][i-4] * q_1[k][j][i-4]; double _v_39_ = q_4[k][j][i-4] * q_1[k][j][i-4]; _t_24_ -= _v_39_; _t_20_ -= 0.0035 * _t_24_; double flux_4kc0jc0ic0 = _t_20_ * dxinv0; double _t_27_ = cons_2[k][j+1][i]; _t_27_ -= cons_2[k][j-1][i]; double _t_26_ = 0.8 * _t_27_; double _t_28_ = cons_2[k][j+2][i]; _t_28_ -= cons_2[k][j-2][i]; _t_26_ -= 0.2 * _t_28_; double _t_29_ = cons_2[k][j+3][i]; _t_29_ -= cons_2[k][j-3][i]; _t_26_ += 0.038 * _t_29_; double _t_30_ = cons_2[k][j+4][i]; _t_30_ -= cons_2[k][j-4][i]; _t_26_ -= 0.0035 * _t_30_; flux_0kc0jc0ic0 -= _t_26_ * dxinv1; double _t_33_ = cons_1[k][j+1][i] * q_2[k][j+1][i]; _t_33_ -= cons_1[k][j-1][i] * q_2[k][j-1][i]; double _t_32_ = 0.8 * _t_33_; double _t_34_ = cons_1[k][j+2][i] * q_2[k][j+2][i]; _t_34_ -= cons_1[k][j-2][i] * q_2[k][j-2][i]; _t_32_ -= 0.2 * _t_34_; double _t_35_ = cons_1[k][j+3][i] * q_2[k][j+3][i]; _t_35_ -= cons_1[k][j-3][i] * q_2[k][j-3][i]; _t_32_ += 0.038 * _t_35_; double _t_36_ = cons_1[k][j+4][i] * q_2[k][j+4][i]; _t_36_ -= cons_1[k][j-4][i] * q_2[k][j-4][i]; _t_32_ -= 0.0035 * _t_36_; flux_1kc0jc0ic0 -= _t_32_ * dxinv1; double _t_39_ = cons_2[k][j+1][i] * q_2[k][j+1][i]; _t_39_ -= cons_2[k][j-1][i] * q_2[k][j-1][i]; _t_39_ += q_4[k][j+1][i]; _t_39_ -= q_4[k][j-1][i]; double _t_38_ = 0.8 * _t_39_; double _t_40_ = cons_2[k][j+2][i] * q_2[k][j+2][i]; _t_40_ -= cons_2[k][j-2][i] * q_2[k][j-2][i]; _t_40_ += q_4[k][j+2][i]; _t_40_ -= q_4[k][j-2][i]; _t_38_ -= 0.2 * _t_40_; double _t_41_ = cons_2[k][j+3][i] * q_2[k][j+3][i]; _t_41_ -= cons_2[k][j-3][i] * q_2[k][j-3][i]; _t_41_ += q_4[k][j+3][i]; _t_41_ -= q_4[k][j-3][i]; _t_38_ += 0.038 * _t_41_; double _t_42_ = cons_2[k][j+4][i] * q_2[k][j+4][i]; _t_42_ -= cons_2[k][j-4][i] * q_2[k][j-4][i]; _t_42_ += q_4[k][j+4][i]; _t_42_ -= q_4[k][j-4][i]; _t_38_ -= 0.0035 * _t_42_; flux_2kc0jc0ic0 -= _t_38_ * dxinv1; double _t_45_ = cons_3[k][j+1][i] * q_2[k][j+1][i]; _t_45_ -= cons_3[k][j-1][i] * q_2[k][j-1][i]; double _t_44_ = 0.8 * _t_45_; double _t_46_ = cons_3[k][j+2][i] * q_2[k][j+2][i]; _t_46_ -= cons_3[k][j-2][i] * q_2[k][j-2][i]; _t_44_ -= 0.2 * _t_46_; double _t_47_ = cons_3[k][j+3][i] * q_2[k][j+3][i]; _t_47_ -= cons_3[k][j-3][i] * q_2[k][j-3][i]; _t_44_ += 0.038 * _t_47_; double _t_48_ = cons_3[k][j+4][i] * q_2[k][j+4][i]; _t_48_ -= cons_3[k][j-4][i] * q_2[k][j-4][i]; _t_44_ -= 0.0035 * _t_48_; flux_3kc0jc0ic0 -= _t_44_ * dxinv1; double _t_51_ = q_4[k][j+1][i] * q_2[k][j+1][i]; double _v_64_ = cons_4[k][j+1][i] * q_2[k][j+1][i]; _t_51_ += _v_64_; _t_51_ -= cons_4[k][j-1][i] * q_2[k][j-1][i]; double _v_67_ = q_4[k][j-1][i] * q_2[k][j-1][i]; _t_51_ -= _v_67_; double _t_50_ = 0.8 * _t_51_; double _t_52_ = q_4[k][j+2][i] * q_2[k][j+2][i]; double _v_68_ = cons_4[k][j+2][i] * q_2[k][j+2][i]; _t_52_ += _v_68_; _t_52_ -= cons_4[k][j-2][i] * q_2[k][j-2][i]; double _v_71_ = q_4[k][j-2][i] * q_2[k][j-2][i]; _t_52_ -= _v_71_; _t_50_ -= 0.2 * _t_52_; double _t_53_ = q_4[k][j+3][i] * q_2[k][j+3][i]; double _v_72_ = cons_4[k][j+3][i] * q_2[k][j+3][i]; _t_53_ += _v_72_; _t_53_ -= cons_4[k][j-3][i] * q_2[k][j-3][i]; double _v_75_ = q_4[k][j-3][i] * q_2[k][j-3][i]; _t_53_ -= _v_75_; _t_50_ += 0.038 * _t_53_; double _t_54_ = q_4[k][j+4][i] * q_2[k][j+4][i]; double _v_76_ = cons_4[k][j+4][i] * q_2[k][j+4][i]; _t_54_ += _v_76_; _t_54_ -= cons_4[k][j-4][i] * q_2[k][j-4][i]; double _v_79_ = q_4[k][j-4][i] * q_2[k][j-4][i]; _t_54_ -= _v_79_; _t_50_ -= 0.0035 * _t_54_; flux_4kc0jc0ic0 -= _t_50_ * dxinv1; double _t_57_ = cons_3[k+1][j][i]; _t_57_ -= cons_3[k-1][j][i]; double _t_56_ = 0.8 * _t_57_; double _t_58_ = cons_3[k+2][j][i]; _t_58_ -= cons_3[k-2][j][i]; _t_56_ -= 0.2 * _t_58_; double _t_59_ = cons_3[k+3][j][i]; _t_59_ -= cons_3[k-3][j][i]; _t_56_ += 0.038 * _t_59_; double _t_60_ = cons_3[k+4][j][i]; _t_60_ -= cons_3[k-4][j][i]; _t_56_ -= 0.0035 * _t_60_; flux_0kc0jc0ic0 -= _t_56_ * dxinv2; double _t_63_ = cons_1[k+1][j][i] * q_3[k+1][j][i]; _t_63_ -= cons_1[k-1][j][i] * q_3[k-1][j][i]; double _t_62_ = 0.8 * _t_63_; double _t_64_ = cons_1[k+2][j][i] * q_3[k+2][j][i]; _t_64_ -= cons_1[k-2][j][i] * q_3[k-2][j][i]; _t_62_ -= 0.2 * _t_64_; double _t_65_ = cons_1[k+3][j][i] * q_3[k+3][j][i]; _t_65_ -= cons_1[k-3][j][i] * q_3[k-3][j][i]; _t_62_ += 0.038 * _t_65_; double _t_66_ = cons_1[k+4][j][i] * q_3[k+4][j][i]; _t_66_ -= cons_1[k-4][j][i] * q_3[k-4][j][i]; _t_62_ -= 0.0035 * _t_66_; flux_1kc0jc0ic0 -= _t_62_ * dxinv2; double _t_69_ = cons_2[k+1][j][i] * q_3[k+1][j][i]; _t_69_ -= cons_2[k-1][j][i] * q_3[k-1][j][i]; double _t_68_ = 0.8 * _t_69_; double _t_70_ = cons_2[k+2][j][i] * q_3[k+2][j][i]; _t_70_ -= cons_2[k-2][j][i] * q_3[k-2][j][i]; _t_68_ -= 0.2 * _t_70_; double _t_71_ = cons_2[k+3][j][i] * q_3[k+3][j][i]; _t_71_ -= cons_2[k-3][j][i] * q_3[k-3][j][i]; _t_68_ += 0.038 * _t_71_; double _t_72_ = cons_2[k+4][j][i] * q_3[k+4][j][i]; _t_72_ -= cons_2[k-4][j][i] * q_3[k-4][j][i]; _t_68_ -= 0.0035 * _t_72_; flux_2kc0jc0ic0 -= _t_68_ * dxinv2; double _t_75_ = cons_3[k+1][j][i] * q_3[k+1][j][i]; _t_75_ -= cons_3[k-1][j][i] * q_3[k-1][j][i]; _t_75_ += q_4[k+1][j][i]; _t_75_ -= q_4[k-1][j][i]; double _t_74_ = 0.8 * _t_75_; double _t_76_ = cons_3[k+2][j][i] * q_3[k+2][j][i]; _t_76_ -= cons_3[k-2][j][i] * q_3[k-2][j][i]; _t_76_ += q_4[k+2][j][i]; _t_76_ -= q_4[k-2][j][i]; _t_74_ -= 0.2 * _t_76_; double _t_77_ = cons_3[k+3][j][i] * q_3[k+3][j][i]; _t_77_ -= cons_3[k-3][j][i] * q_3[k-3][j][i]; _t_77_ += q_4[k+3][j][i]; _t_77_ -= q_4[k-3][j][i]; _t_74_ += 0.038 * _t_77_; double _t_78_ = cons_3[k+4][j][i] * q_3[k+4][j][i]; _t_78_ -= cons_3[k-4][j][i] * q_3[k-4][j][i]; _t_78_ += q_4[k+4][j][i]; _t_78_ -= q_4[k-4][j][i]; _t_74_ -= 0.0035 * _t_78_; flux_3kc0jc0ic0 -= _t_74_ * dxinv2; double _t_81_ = q_4[k+1][j][i] * q_3[k+1][j][i]; double _v_104_ = cons_4[k+1][j][i] * q_3[k+1][j][i]; _t_81_ += _v_104_; _t_81_ -= cons_4[k-1][j][i] * q_3[k-1][j][i]; double _v_107_ = q_4[k-1][j][i] * q_3[k-1][j][i]; _t_81_ -= _v_107_; double _t_80_ = 0.8 * _t_81_; double _t_82_ = q_4[k+2][j][i] * q_3[k+2][j][i]; double _v_108_ = cons_4[k+2][j][i] * q_3[k+2][j][i]; _t_82_ += _v_108_; _t_82_ -= cons_4[k-2][j][i] * q_3[k-2][j][i]; double _v_111_ = q_4[k-2][j][i] * q_3[k-2][j][i]; _t_82_ -= _v_111_; _t_80_ -= 0.2 * _t_82_; double _t_83_ = q_4[k+3][j][i] * q_3[k+3][j][i]; double _v_112_ = cons_4[k+3][j][i] * q_3[k+3][j][i]; _t_83_ += _v_112_; _t_83_ -= cons_4[k-3][j][i] * q_3[k-3][j][i]; double _v_115_ = q_4[k-3][j][i] * q_3[k-3][j][i]; _t_83_ -= _v_115_; _t_80_ += 0.038 * _t_83_; double _t_84_ = q_4[k+4][j][i] * q_3[k+4][j][i]; double _v_116_ = cons_4[k+4][j][i] * q_3[k+4][j][i]; _t_84_ += _v_116_; _t_84_ -= cons_4[k-4][j][i] * q_3[k-4][j][i]; double _v_119_ = q_4[k-4][j][i] * q_3[k-4][j][i]; _t_84_ -= _v_119_; _t_80_ -= 0.0035 * _t_84_; flux_4kc0jc0ic0 -= _t_80_ * dxinv2; flux_0[k][j][i] = flux_0kc0jc0ic0; flux_1[k][j][i] = flux_1kc0jc0ic0; flux_2[k][j][i] = flux_2kc0jc0ic0; flux_3[k][j][i] = flux_3kc0jc0ic0; flux_4[k][j][i] = flux_4kc0jc0ic0; } } extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) { double *flux_0; cudaMalloc (&flux_0, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_0\n"); cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *flux_1; cudaMalloc (&flux_1, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_1\n"); cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *flux_2; cudaMalloc (&flux_2, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_2\n"); cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *flux_3; cudaMalloc (&flux_3, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_3\n"); cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *flux_4; cudaMalloc (&flux_4, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for flux_4\n"); cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *cons_1; cudaMalloc (&cons_1, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_1\n"); cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *cons_2; cudaMalloc (&cons_2, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_2\n"); cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *cons_3; cudaMalloc (&cons_3, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_3\n"); cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *cons_4; cudaMalloc (&cons_4, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for cons_4\n"); cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *q_1; cudaMalloc (&q_1, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_1\n"); cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *q_2; cudaMalloc (&q_2, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_2\n"); cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *q_3; cudaMalloc (&q_3, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_3\n"); cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); double *q_4; cudaMalloc (&q_4, sizeof(double)*L*M*N); check_error ("Failed to allocate device memory for q_4\n"); cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice); dim3 blockconfig (16, 4, 4); dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z)); hypterm <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N); cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost); }
9c0e07626d34db5b20f12bc6848489b5aed4d9f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************************* * SmithWaterman algorithm * Purpose: Local alignment of nucleotide or protein sequences * Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro * Compilation: nvcc -std=c++11 -O3 -DNDEBUG=1 cuda_unified_smithW.cu -o cuda_um_smithW * nvcc -std=c++11 -O0 -DDEBUG -g -G cuda_unified_smithW.cu -o dbg_cuda_smithW * Execution: ./cuda_smithW <number_of_col> <number_of_rows> *********************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> //~ #include <time.h> //~ #include <omp.h> #include <cassert> #include <chrono> #include <iostream> #ifndef NDEBUG static constexpr bool DEBUG_MODE = true; #else static constexpr bool DEBUG_MODE = false; #endif /* NDEBUG */ /*-------------------------------------------------------------------- * Text Tweaks */ #define RESET "\033[0m" #define BOLDRED "\033[1m\033[31m" /* Bold Red */ /* End of text tweaks */ /*-------------------------------------------------------------------- * Constants */ #define PATH -1 #define NONE 0 /* End of constants */ /*-------------------------------------------------------------------- * Helpers */ #define min(x, y) (((x) < (y)) ? (x) : (y)) #define max(a,b) ((a) > (b) ? a : b) // #define DEBUG /* End of Helpers */ // my types // \note changed type to unsigned to make it collaborate with CUDA atomicCAS // \todo maybe rename it to index_t and change all long longs to index_t typedef unsigned long long maxpos_t; /// defines type for indices into arrays and matrices /// (needs to be a signed type) typedef long long int index_t; /// defines data type for scoring typedef int score_t; /// defines data type for linking paths enum link_t { UNDEF = -1, NOLINK = 0, UP = 1, LEFT = 2, DIAGONAL = 3 }; /*-------------------------------------------------------------------- * Functions Prototypes */ int backtrack(link_t* P, maxpos_t maxPos); void printMatrix(score_t* matrix); void printPredecessorMatrix(link_t* matrix); void generate(void); long long int nElement(long long int i); // \pp modified to pass i (a induction variable) by value void calcFirstDiagElement(long long int i, long long int *si, long long int *sj); /* End of prototypes */ /*-------------------------------------------------------------------- * Global Variables */ // Defines size of strings to be compared index_t m = 8; // Columns - Size of string a index_t n = 9; // Rows - Size of string b // Defines scores static const score_t MATCH_SCORE = 3; // 5 in omp_smithW_orig static const score_t MISSMATCH_SCORE = -3; // -3 static const score_t GAP_SCORE = -2; // -4 // GPU THREADS PER BLOCK static constexpr int THREADS_PER_BLOCK = 1024; // Strings over the Alphabet Sigma char *a, *b; /* End of global variables */ /*-------------------------------------------------------------------- * Function: matchMissmatchScore * Purpose: Similarity function on the alphabet for match/missmatch */ __device__ score_t matchMissmatchScore_cuda(index_t i, index_t j, const char* seqa, const char* seqb) { if (seqa[j - 1] == seqb[i - 1]) return MATCH_SCORE; return MISSMATCH_SCORE; } /* End of matchMissmatchScore_cuda */ /*-------------------------------------------------------------------- * Function: SimilarityScore * Purpose: Calculate the maximum Similarity-Score H(i,j) */ __global__ void similarityScore_kernel( index_t si, index_t sj, index_t j_upper_bound, score_t* H, link_t* P, maxpos_t* maxPos, const char* seqa, const char* seqb, index_t cols ) { // compute the second loop index j const index_t loopj = blockIdx.x * blockDim.x + threadIdx.x; if (loopj >= j_upper_bound) return; // compute original i and j index_t i = si - loopj; index_t j = sj + loopj; // bounds test for matchMissmatchScore_cuda assert(i > 0); // was: assert(i > 0 && i <= n); -- n currently not passed in assert(j > 0 && j <= cols); // Stores index of element maxpos_t index = cols * i + j; assert(index >= cols); // Get element above score_t up = H[index - cols] + GAP_SCORE; assert(index > 0); // Get element on the left score_t left = H[index - 1] + GAP_SCORE; assert(index > cols); // Get element on the diagonal score_t diag = H[index - cols - 1] + matchMissmatchScore_cuda(i, j, seqa, seqb); // Calculates the maximum score_t max = NONE; link_t pred = NOLINK; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '' insert e '' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '' insert e '' remove * b=GACTT-A * a=GAATTCA */ // same letter if (diag > max) { max = diag; pred = DIAGONAL; } // remove letter if (up > max) { max = up; pred = UP; } //insert letter if (left > max) { max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; // Updates maximum score to be used as seed on backtrack { // \note \pp // locks seem to be a NOGO in CUDA warps, // thus the update to set the maximum is made nonblocking. maxpos_t current = *maxPos; maxpos_t assumed = current+1; while (assumed != current && max > H[current]) { assumed = current; // \note consider atomicCAS_system for multi GPU systems current = atomicCAS(maxPos, assumed, index); } } } /* End of similarityScore_kernel */ static inline void check_cuda_success(hipError_t err) { if (err == hipSuccess) return; std::cerr << "CUDA error: " << hipGetErrorString(err) << std::endl; exit(0); } /// malloc replacement template<class T> static T* unified_alloc(size_t numelems) { void* ptr /* = NULL*/; hipError_t err = hipMallocManaged(&ptr, numelems * sizeof(T), hipMemAttachGlobal); check_cuda_success(err); //~ err = hipMemAdvise(ptr, numelems * sizeof(T), hipMemAdviseSetPreferredLocation, 0); //~ check_cuda_success(err); return reinterpret_cast<T*>(ptr); } /// calloc replacement // \note depending on the OS, the memset may be superfluous. template<class T> static T* unified_alloc_zero(size_t numelems) { T* ptr = unified_alloc<T>(numelems); hipError_t err = hipMemsetAsync(ptr, 0, numelems*sizeof(T), 0); check_cuda_success(err); return ptr; } static void unified_free(void* ptr) { hipError_t err = hipFree(ptr); check_cuda_success(err); } // Start position for backtrack // \note // 1) moved out from main function so it can be set in managed space // 2) made unsigned to fit with CUDA atomicCAS prototype static __managed__ maxpos_t maxPos = 0; /*-------------------------------------------------------------------- * Function: main */ int main(int argc, char* argv[]) { typedef std::chrono::time_point<std::chrono::system_clock> time_point; bool useBuiltInData = true; if (argc==3) { m = strtoll(argv[1], NULL, 10); n = strtoll(argv[2], NULL, 10); useBuiltInData = false; } //#ifdef DEBUG if (useBuiltInData) printf ("Using built-in data for testing ..\n"); // Allocates a and b //~ a = malloc(m * sizeof(char)); //~ b = malloc(n * sizeof(char)); a = unified_alloc<char>(m+1); b = unified_alloc<char>(n+1); std::cerr << "a,b allocated: " << m << "/" << n << std::endl; // Because now we have zeros m++; // \note \pp really needed??? n++; // \note \pp really needed??? if (useBuiltInData) { //Uncomment this to test the sequence available at //http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 // assert(m=11 && n=7); // a[0] = 'C'; // a[1] = 'G'; // a[2] = 'T'; // a[3] = 'G'; // a[4] = 'A'; // a[5] = 'A'; // a[6] = 'T'; // a[7] = 'T'; // a[8] = 'C'; // a[9] = 'A'; // a[10] = 'T'; // b[0] = 'G'; // b[1] = 'A'; // b[2] = 'C'; // b[3] = 'T'; // b[4] = 'T'; // b[5] = 'A'; // b[6] = 'C'; // https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example // Using the wiki example to verify the results assert(m>=8 && n>=9); b[0] = 'G'; b[1] = 'G'; b[2] = 'T'; b[3] = 'T'; b[4] = 'G'; b[5] = 'A'; b[6] = 'C'; b[7] = 'T'; b[8] = 'A'; a[0] = 'T'; a[1] = 'G'; a[2] = 'T'; a[3] = 'T'; a[4] = 'A'; a[5] = 'C'; a[6] = 'G'; a[7] = 'G'; } else { //Gen random arrays a and b generate(); } time_point starttime = std::chrono::system_clock::now(); // setting hipMemAdviseSetReadMostly is mostly ineffective // (no runtime performance difference on pascal) //~ hipMemAdvise(a, sizeof(char)*m, hipMemAdviseSetReadMostly, 0); //~ hipMemAdvise(b, sizeof(char)*n, hipMemAdviseSetReadMostly, 0); // Allocates similarity matrix H //~ int* H = calloc(m * n, sizeof(int)); score_t* H = unified_alloc_zero<score_t>(m * n); // Allocates predecessor matrix P //~ int* P = calloc(m * n, sizeof(int)); //~ int* P = unified_alloc_zero<int>(m * n); link_t* P = unified_alloc<link_t>(m * n); // Because now we have zeros ((m-1) + (n-1) - 1) long long int nDiag = m + n - 3; for (int i = 1; i <= nDiag; ++i) { long long nEle = nElement(i); long long si /* uninitialized */; long long sj /* uninitialized */; calcFirstDiagElement(i, &si, &sj); { // CUDA, here we go // \note // * MAKE SURE THAT a,b,H,P are ACCESSIBLE from GPU. // This prototype allocates a,b,H,P in unified memory space, thus // we just copy the pointers. If the allocation policy changes, // memory referenced by a,b,H,P has to be transferred to the GPU, // and memory referenced by H and P has to be transferred back. // * a and b do not change, thus they only need to be transferred // initially. // * transfers of H and P could probably be optimized to only // include data along the wavefront. // \todo // * study amount of data transfer for H and P //~ const long long ITER_SPACE = ceil(nEle/THREADS_PER_BLOCK); const long long ITER_SPACE = (nEle+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; // comp. of ai and aj moved into CUDA kernel hipLaunchKernelGGL(( similarityScore_kernel) , dim3(ITER_SPACE), dim3(THREADS_PER_BLOCK), 0, 0, si, sj, nEle, H, P, &maxPos, a, b, m); // \todo sync needed? // - not needed when control is not returned to host // - may not be needed at all depending on device capability } } check_cuda_success( hipStreamSynchronize(0) ); time_point endtime = std::chrono::system_clock::now(); int len = backtrack(P, maxPos); // time_point endtime = std::chrono::system_clock::now(); if (DEBUG_MODE) { printf("\nSimilarity Matrix:\n"); printMatrix(H); printf("\nPredecessor Matrix:\n"); printPredecessorMatrix(P); } if (useBuiltInData) { const bool correct = H[maxPos] == 13; std::cerr << "Max(builtin data): " << H[maxPos] << " == 13? " << correct << " " << maxPos << std::endl; if (!correct) throw std::logic_error("Invalid result"); } int elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(endtime-starttime).count(); std::cout << "\nElapsed time: " << elapsed << " ms" << "\nPath length: " << len << "\nScore: " << H[maxPos] << std::endl; // Frees similarity matrixes unified_free(H); unified_free(P); //Frees input arrays unified_free(a); unified_free(b); return 0; } /* End of main */ /*-------------------------------------------------------------------- * Function: nElement * Purpose: Calculate the number of i-diagonal elements */ long long int nElement(long long int i) { if (i < m && i < n) { // Number of elements in the diagonal is increasing return i; } else if (i < max(m, n)) { //Number of elements in the diagonal is stable long int min_mn = min(m, n); return min_mn - 1; } else { //Number of elements in the diagonal is decreasing long int min_mn = min(m, n); return 2 * min_mn - i + abs(m - n) - 2; } } /*-------------------------------------------------------------------- * Function: calcElement * Purpose: Calculate the position of (si, sj)-element */ void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) { // Calculate the first element of diagonal if (i < n) { *si = i; *sj = 1; } else { *si = n - 1; *sj = i - n + 2; } } /*-------------------------------------------------------------------- * Function: backtrack * Purpose: Modify matrix to print, path change from value to PATH */ int backtrack(link_t* P, maxpos_t maxPos) { //hold maxPos value long long int predPos = 0; int len = 0; #ifdef DEBUG std::cerr << "maxpos = " << maxPos << std::endl; #endif //backtrack from maxPos to startPos = 0 do { #ifdef DEBUG std::cerr << "P[" << maxPos << "] = " << std::flush << P[maxPos] << std::endl; #endif switch (P[maxPos]) { case DIAGONAL: predPos = maxPos - m - 1; break; case UP: predPos = maxPos - m; break; case LEFT: predPos = maxPos - 1; break; default: assert(false); } #ifdef DEBUG P[maxPos] *= PATH; #endif maxPos = predPos; ++len; } while (P[maxPos] != NONE); return len; } /* End of backtrack */ /*-------------------------------------------------------------------- * Function: printMatrix * Purpose: Print Matrix */ void printMatrix(int* matrix) { long long int i, j; printf("-\t-\t"); for (j = 0; j < m-1; j++) { printf("%c\t", a[j]); } printf("\n-\t"); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c\t", b[i-1]); printf("%d\t", matrix[m * i + j]); } printf("\n"); } } /* End of printMatrix */ /*-------------------------------------------------------------------- * Function: printPredecessorMatrix * Purpose: Print predecessor matrix */ void printPredecessorMatrix(link_t* matrix) { long long int i, j, index; printf(" "); for (j = 0; j < m-1; j++) { printf("%c ", a[j]); } printf("\n "); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c ", b[i-1]); index = m * i + j; if (matrix[index] < 0) { printf(BOLDRED); if (matrix[index] == -UP) printf(" "); else if (matrix[index] == -LEFT) printf(" "); else if (matrix[index] == -DIAGONAL) printf(" "); else printf("- "); printf(RESET); } else { if (matrix[index] == UP) printf(" "); else if (matrix[index] == LEFT) printf(" "); else if (matrix[index] == DIAGONAL) printf(" "); else printf("- "); } } printf("\n"); } } /* End of printPredecessorMatrix */ /*-------------------------------------------------------------------- * Function: generate * Purpose: Generate arrays a and b */ void generate() { //Random seed srand(time(NULL)); //Generates the values of a long long int i; for (i = 0; i < m; i++) { int aux = rand() % 4; if (aux == 0) a[i] = 'A'; else if (aux == 2) a[i] = 'C'; else if (aux == 3) a[i] = 'G'; else a[i] = 'T'; } //Generates the values of b for (i = 0; i < n; i++) { int aux = rand() % 4; if (aux == 0) b[i] = 'A'; else if (aux == 2) b[i] = 'C'; else if (aux == 3) b[i] = 'G'; else b[i] = 'T'; } } /* End of generate */ /*-------------------------------------------------------------------- * External References: * http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 * http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm * http://baba.sourceforge.net/ */
9c0e07626d34db5b20f12bc6848489b5aed4d9f8.cu
/********************************************************************************* * Smith–Waterman algorithm * Purpose: Local alignment of nucleotide or protein sequences * Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro * Compilation: nvcc -std=c++11 -O3 -DNDEBUG=1 cuda_unified_smithW.cu -o cuda_um_smithW * nvcc -std=c++11 -O0 -DDEBUG -g -G cuda_unified_smithW.cu -o dbg_cuda_smithW * Execution: ./cuda_smithW <number_of_col> <number_of_rows> *********************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> //~ #include <time.h> //~ #include <omp.h> #include <cassert> #include <chrono> #include <iostream> #ifndef NDEBUG static constexpr bool DEBUG_MODE = true; #else static constexpr bool DEBUG_MODE = false; #endif /* NDEBUG */ /*-------------------------------------------------------------------- * Text Tweaks */ #define RESET "\033[0m" #define BOLDRED "\033[1m\033[31m" /* Bold Red */ /* End of text tweaks */ /*-------------------------------------------------------------------- * Constants */ #define PATH -1 #define NONE 0 /* End of constants */ /*-------------------------------------------------------------------- * Helpers */ #define min(x, y) (((x) < (y)) ? (x) : (y)) #define max(a,b) ((a) > (b) ? a : b) // #define DEBUG /* End of Helpers */ // my types // \note changed type to unsigned to make it collaborate with CUDA atomicCAS // \todo maybe rename it to index_t and change all long longs to index_t typedef unsigned long long maxpos_t; /// defines type for indices into arrays and matrices /// (needs to be a signed type) typedef long long int index_t; /// defines data type for scoring typedef int score_t; /// defines data type for linking paths enum link_t { UNDEF = -1, NOLINK = 0, UP = 1, LEFT = 2, DIAGONAL = 3 }; /*-------------------------------------------------------------------- * Functions Prototypes */ int backtrack(link_t* P, maxpos_t maxPos); void printMatrix(score_t* matrix); void printPredecessorMatrix(link_t* matrix); void generate(void); long long int nElement(long long int i); // \pp modified to pass i (a induction variable) by value void calcFirstDiagElement(long long int i, long long int *si, long long int *sj); /* End of prototypes */ /*-------------------------------------------------------------------- * Global Variables */ // Defines size of strings to be compared index_t m = 8; // Columns - Size of string a index_t n = 9; // Rows - Size of string b // Defines scores static const score_t MATCH_SCORE = 3; // 5 in omp_smithW_orig static const score_t MISSMATCH_SCORE = -3; // -3 static const score_t GAP_SCORE = -2; // -4 // GPU THREADS PER BLOCK static constexpr int THREADS_PER_BLOCK = 1024; // Strings over the Alphabet Sigma char *a, *b; /* End of global variables */ /*-------------------------------------------------------------------- * Function: matchMissmatchScore * Purpose: Similarity function on the alphabet for match/missmatch */ __device__ score_t matchMissmatchScore_cuda(index_t i, index_t j, const char* seqa, const char* seqb) { if (seqa[j - 1] == seqb[i - 1]) return MATCH_SCORE; return MISSMATCH_SCORE; } /* End of matchMissmatchScore_cuda */ /*-------------------------------------------------------------------- * Function: SimilarityScore * Purpose: Calculate the maximum Similarity-Score H(i,j) */ __global__ void similarityScore_kernel( index_t si, index_t sj, index_t j_upper_bound, score_t* H, link_t* P, maxpos_t* maxPos, const char* seqa, const char* seqb, index_t cols ) { // compute the second loop index j const index_t loopj = blockIdx.x * blockDim.x + threadIdx.x; if (loopj >= j_upper_bound) return; // compute original i and j index_t i = si - loopj; index_t j = sj + loopj; // bounds test for matchMissmatchScore_cuda assert(i > 0); // was: assert(i > 0 && i <= n); -- n currently not passed in assert(j > 0 && j <= cols); // Stores index of element maxpos_t index = cols * i + j; assert(index >= cols); // Get element above score_t up = H[index - cols] + GAP_SCORE; assert(index > 0); // Get element on the left score_t left = H[index - 1] + GAP_SCORE; assert(index > cols); // Get element on the diagonal score_t diag = H[index - cols - 1] + matchMissmatchScore_cuda(i, j, seqa, seqb); // Calculates the maximum score_t max = NONE; link_t pred = NOLINK; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '←' insert e '↑' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '←' insert e '↑' remove * b=GACTT-A * a=GAATTCA */ // same letter ↖ if (diag > max) { max = diag; pred = DIAGONAL; } // remove letter ↑ if (up > max) { max = up; pred = UP; } //insert letter ← if (left > max) { max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; // Updates maximum score to be used as seed on backtrack { // \note \pp // locks seem to be a NOGO in CUDA warps, // thus the update to set the maximum is made nonblocking. maxpos_t current = *maxPos; maxpos_t assumed = current+1; while (assumed != current && max > H[current]) { assumed = current; // \note consider atomicCAS_system for multi GPU systems current = atomicCAS(maxPos, assumed, index); } } } /* End of similarityScore_kernel */ static inline void check_cuda_success(cudaError_t err) { if (err == cudaSuccess) return; std::cerr << "CUDA error: " << cudaGetErrorString(err) << std::endl; exit(0); } /// malloc replacement template<class T> static T* unified_alloc(size_t numelems) { void* ptr /* = NULL*/; cudaError_t err = cudaMallocManaged(&ptr, numelems * sizeof(T), cudaMemAttachGlobal); check_cuda_success(err); //~ err = cudaMemAdvise(ptr, numelems * sizeof(T), cudaMemAdviseSetPreferredLocation, 0); //~ check_cuda_success(err); return reinterpret_cast<T*>(ptr); } /// calloc replacement // \note depending on the OS, the memset may be superfluous. template<class T> static T* unified_alloc_zero(size_t numelems) { T* ptr = unified_alloc<T>(numelems); cudaError_t err = cudaMemsetAsync(ptr, 0, numelems*sizeof(T), 0); check_cuda_success(err); return ptr; } static void unified_free(void* ptr) { cudaError_t err = cudaFree(ptr); check_cuda_success(err); } // Start position for backtrack // \note // 1) moved out from main function so it can be set in managed space // 2) made unsigned to fit with CUDA atomicCAS prototype static __managed__ maxpos_t maxPos = 0; /*-------------------------------------------------------------------- * Function: main */ int main(int argc, char* argv[]) { typedef std::chrono::time_point<std::chrono::system_clock> time_point; bool useBuiltInData = true; if (argc==3) { m = strtoll(argv[1], NULL, 10); n = strtoll(argv[2], NULL, 10); useBuiltInData = false; } //#ifdef DEBUG if (useBuiltInData) printf ("Using built-in data for testing ..\n"); // Allocates a and b //~ a = malloc(m * sizeof(char)); //~ b = malloc(n * sizeof(char)); a = unified_alloc<char>(m+1); b = unified_alloc<char>(n+1); std::cerr << "a,b allocated: " << m << "/" << n << std::endl; // Because now we have zeros m++; // \note \pp really needed??? n++; // \note \pp really needed??? if (useBuiltInData) { //Uncomment this to test the sequence available at //http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 // assert(m=11 && n=7); // a[0] = 'C'; // a[1] = 'G'; // a[2] = 'T'; // a[3] = 'G'; // a[4] = 'A'; // a[5] = 'A'; // a[6] = 'T'; // a[7] = 'T'; // a[8] = 'C'; // a[9] = 'A'; // a[10] = 'T'; // b[0] = 'G'; // b[1] = 'A'; // b[2] = 'C'; // b[3] = 'T'; // b[4] = 'T'; // b[5] = 'A'; // b[6] = 'C'; // https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example // Using the wiki example to verify the results assert(m>=8 && n>=9); b[0] = 'G'; b[1] = 'G'; b[2] = 'T'; b[3] = 'T'; b[4] = 'G'; b[5] = 'A'; b[6] = 'C'; b[7] = 'T'; b[8] = 'A'; a[0] = 'T'; a[1] = 'G'; a[2] = 'T'; a[3] = 'T'; a[4] = 'A'; a[5] = 'C'; a[6] = 'G'; a[7] = 'G'; } else { //Gen random arrays a and b generate(); } time_point starttime = std::chrono::system_clock::now(); // setting cudaMemAdviseSetReadMostly is mostly ineffective // (no runtime performance difference on pascal) //~ cudaMemAdvise(a, sizeof(char)*m, cudaMemAdviseSetReadMostly, 0); //~ cudaMemAdvise(b, sizeof(char)*n, cudaMemAdviseSetReadMostly, 0); // Allocates similarity matrix H //~ int* H = calloc(m * n, sizeof(int)); score_t* H = unified_alloc_zero<score_t>(m * n); // Allocates predecessor matrix P //~ int* P = calloc(m * n, sizeof(int)); //~ int* P = unified_alloc_zero<int>(m * n); link_t* P = unified_alloc<link_t>(m * n); // Because now we have zeros ((m-1) + (n-1) - 1) long long int nDiag = m + n - 3; for (int i = 1; i <= nDiag; ++i) { long long nEle = nElement(i); long long si /* uninitialized */; long long sj /* uninitialized */; calcFirstDiagElement(i, &si, &sj); { // CUDA, here we go // \note // * MAKE SURE THAT a,b,H,P are ACCESSIBLE from GPU. // This prototype allocates a,b,H,P in unified memory space, thus // we just copy the pointers. If the allocation policy changes, // memory referenced by a,b,H,P has to be transferred to the GPU, // and memory referenced by H and P has to be transferred back. // * a and b do not change, thus they only need to be transferred // initially. // * transfers of H and P could probably be optimized to only // include data along the wavefront. // \todo // * study amount of data transfer for H and P //~ const long long ITER_SPACE = ceil(nEle/THREADS_PER_BLOCK); const long long ITER_SPACE = (nEle+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; // comp. of ai and aj moved into CUDA kernel similarityScore_kernel <<<ITER_SPACE, THREADS_PER_BLOCK, 0, 0>>> (si, sj, nEle, H, P, &maxPos, a, b, m); // \todo sync needed? // - not needed when control is not returned to host // - may not be needed at all depending on device capability } } check_cuda_success( cudaStreamSynchronize(0) ); time_point endtime = std::chrono::system_clock::now(); int len = backtrack(P, maxPos); // time_point endtime = std::chrono::system_clock::now(); if (DEBUG_MODE) { printf("\nSimilarity Matrix:\n"); printMatrix(H); printf("\nPredecessor Matrix:\n"); printPredecessorMatrix(P); } if (useBuiltInData) { const bool correct = H[maxPos] == 13; std::cerr << "Max(builtin data): " << H[maxPos] << " == 13? " << correct << " " << maxPos << std::endl; if (!correct) throw std::logic_error("Invalid result"); } int elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(endtime-starttime).count(); std::cout << "\nElapsed time: " << elapsed << " ms" << "\nPath length: " << len << "\nScore: " << H[maxPos] << std::endl; // Frees similarity matrixes unified_free(H); unified_free(P); //Frees input arrays unified_free(a); unified_free(b); return 0; } /* End of main */ /*-------------------------------------------------------------------- * Function: nElement * Purpose: Calculate the number of i-diagonal elements */ long long int nElement(long long int i) { if (i < m && i < n) { // Number of elements in the diagonal is increasing return i; } else if (i < max(m, n)) { //Number of elements in the diagonal is stable long int min_mn = min(m, n); return min_mn - 1; } else { //Number of elements in the diagonal is decreasing long int min_mn = min(m, n); return 2 * min_mn - i + abs(m - n) - 2; } } /*-------------------------------------------------------------------- * Function: calcElement * Purpose: Calculate the position of (si, sj)-element */ void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) { // Calculate the first element of diagonal if (i < n) { *si = i; *sj = 1; } else { *si = n - 1; *sj = i - n + 2; } } /*-------------------------------------------------------------------- * Function: backtrack * Purpose: Modify matrix to print, path change from value to PATH */ int backtrack(link_t* P, maxpos_t maxPos) { //hold maxPos value long long int predPos = 0; int len = 0; #ifdef DEBUG std::cerr << "maxpos = " << maxPos << std::endl; #endif //backtrack from maxPos to startPos = 0 do { #ifdef DEBUG std::cerr << "P[" << maxPos << "] = " << std::flush << P[maxPos] << std::endl; #endif switch (P[maxPos]) { case DIAGONAL: predPos = maxPos - m - 1; break; case UP: predPos = maxPos - m; break; case LEFT: predPos = maxPos - 1; break; default: assert(false); } #ifdef DEBUG P[maxPos] *= PATH; #endif maxPos = predPos; ++len; } while (P[maxPos] != NONE); return len; } /* End of backtrack */ /*-------------------------------------------------------------------- * Function: printMatrix * Purpose: Print Matrix */ void printMatrix(int* matrix) { long long int i, j; printf("-\t-\t"); for (j = 0; j < m-1; j++) { printf("%c\t", a[j]); } printf("\n-\t"); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c\t", b[i-1]); printf("%d\t", matrix[m * i + j]); } printf("\n"); } } /* End of printMatrix */ /*-------------------------------------------------------------------- * Function: printPredecessorMatrix * Purpose: Print predecessor matrix */ void printPredecessorMatrix(link_t* matrix) { long long int i, j, index; printf(" "); for (j = 0; j < m-1; j++) { printf("%c ", a[j]); } printf("\n "); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c ", b[i-1]); index = m * i + j; if (matrix[index] < 0) { printf(BOLDRED); if (matrix[index] == -UP) printf("↑ "); else if (matrix[index] == -LEFT) printf("← "); else if (matrix[index] == -DIAGONAL) printf("↖ "); else printf("- "); printf(RESET); } else { if (matrix[index] == UP) printf("↑ "); else if (matrix[index] == LEFT) printf("← "); else if (matrix[index] == DIAGONAL) printf("↖ "); else printf("- "); } } printf("\n"); } } /* End of printPredecessorMatrix */ /*-------------------------------------------------------------------- * Function: generate * Purpose: Generate arrays a and b */ void generate() { //Random seed srand(time(NULL)); //Generates the values of a long long int i; for (i = 0; i < m; i++) { int aux = rand() % 4; if (aux == 0) a[i] = 'A'; else if (aux == 2) a[i] = 'C'; else if (aux == 3) a[i] = 'G'; else a[i] = 'T'; } //Generates the values of b for (i = 0; i < n; i++) { int aux = rand() % 4; if (aux == 0) b[i] = 'A'; else if (aux == 2) b[i] = 'C'; else if (aux == 3) b[i] = 'G'; else b[i] = 'T'; } } /* End of generate */ /*-------------------------------------------------------------------- * External References: * http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 * http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm * http://baba.sourceforge.net/ */
e50a863fdaf40becb3271a2c7e43e881bab8c0ea.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ndarray_function.cu * \brief GPU Implementation of ndarray function. */ // this will be invoked by nvcc and compile GPU version #include <hipcub/hipcub.hpp> #include <dmlc/logging.h> #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/elemwise_sum.h" #include "../operator/tensor/indexing_op.h" #include "../operator/tensor/init_op.h" #include "../operator/tensor/util/tensor_util-inl.h" #include "../operator/tensor/util/tensor_util-inl.cuh" #include "../common/cuda_utils.h" #include "./ndarray_function.h" #include "./ndarray_function-inl.h" #include "./ndarray_function-inl.cuh" namespace mxnet { namespace ndarray { template<> void Copy<cpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<gpu, DType>(), from.FlatTo1D<cpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, cpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<cpu, DType>(), from.FlatTo1D<gpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { if (from_ctx.dev_id == to_ctx.dev_id) { mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { if (to->type_flag_ == from.type_flag_) { mshadow::Copy(to->FlatTo1D<gpu, DType>(s), from.FlatTo1D<gpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to->FlatTo1D<gpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<gpu, SrcDType>(s)); }) } }) } else { CHECK(from.CheckContiguous() && to->CheckContiguous()) << "copy across only support continugous memory"; CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); CHECK(s != NULL) << "need stream in GPU context"; hipMemcpyPeerAsync(to->dptr_, to_ctx.dev_id, from.dptr_, from_ctx.dev_id, from.shape_.Size() * mshadow::mshadow_sizeof(to->type_flag_), s->stream_); } } /*! * \brief GPU impl of elemwise sum for rowsparse tensors. */ void ElementwiseSumRspImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace rowsparse; using nnvm::dim_t; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "Expected rowsparse storage_type (" << out->storage_type() << " given)"; int init = 0; for (const auto& nd : nds) { if (nd.storage_initialized()) { init++; break; } } if (init == 0) { FillZerosRspImpl(s, *out); return; } const dim_t num_rows = out->shape()[0]; const dim_t row_length = out->shape().ProdShape(1, out->shape().ndim()); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { // row_idx type // Allocate temporary storage for row_flg array and cub's prefix sum operation IType* row_flg = NULL; void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipStream_t stream = mshadow::Stream<gpu>::GetStream(s); hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, stream); mshadow::Tensor<gpu, 1, char> workspace = rsc .get_space_typed<gpu, 1, char>(mshadow::Shape1(num_rows * sizeof(IType) + temp_storage_bytes), s); row_flg = reinterpret_cast<IType*>(workspace.dptr_); d_temp_storage = workspace.dptr_ + num_rows*sizeof(IType); // Mark row_flg array with 0 for zero rows and 1 for non-zero rows dim_t num_threads = num_rows; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, row_flg); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr; mxnet_op::Kernel<MarkRspRowFlgKernel, gpu>::Launch(s, num_threads, row_flg, nd_row_idx, nd_nnr); } } // Compute inclusive prefix sum over row_flg hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, stream); // Get total number of output non-zero rows from GPU and allocate out data and row_idx dim_t nnr_out = 0; CUDA_CALL(hipMemcpyAsync(&nnr_out, &row_flg[num_rows-1], sizeof(dim_t), hipMemcpyDeviceToHost, stream)); CUDA_CALL(hipStreamSynchronize(stream)); out->CheckAndAlloc({mshadow::Shape1(nnr_out)}); IType* out_row_idx = out->aux_data(kIdx).dptr<IType>(); DType* out_data = out->data().dptr<DType>(); // Fill row_idx array of output using row_flg num_threads = num_rows; mxnet_op::Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_threads, out_row_idx, row_flg, num_rows); // Perform elementwise addition, writing to output data num_threads = nnr_out * row_length; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, out_data); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const DType* nd_data = nd.data().dptr<DType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr * row_length; mxnet_op::Kernel<ElementWiseRspAdditionKernel, gpu>::Launch(s, num_threads, out_data, row_flg, nd_row_idx, nd_data, nd_nnr, row_length); } } }); }); } void ElementwiseSumDnsCsrDnsImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace mxnet::op::mxnet_op; const TBlob& out_data = out->data(); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type Kernel<Sum, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), kWriteTo, nds[0].data().dptr<DType>(), nds[2].data().dptr<DType>()); const TBlob& csr_data = nds[1].data(); const TBlob& csr_indices = nds[1].aux_data(csr::kIdx); const TBlob& csr_indptr = nds[1].aux_data(csr::kIndPtr); const nnvm::dim_t num_rows = nds[1].shape()[0]; const nnvm::dim_t num_cols = nds[1].shape()[1]; MSHADOW_IDX_TYPE_SWITCH(csr_indices.type_flag_, IType, { // indices type MSHADOW_IDX_TYPE_SWITCH(csr_indptr.type_flag_, CType, { // indptr type if (nds[1].storage_initialized()) { Kernel<ElemwiseDnsCsrDnsWarpKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, kWarpSize * num_rows, out_data.dptr<DType>(), out_data.dptr<DType>(), csr_data.dptr<DType>(), csr_indices.dptr<IType>(), csr_indptr.dptr<CType>(), num_rows, num_cols); } }); }); }); } void ElementwiseSumContainsDnsImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace mxnet::op::mxnet_op; const TBlob& out_data = out->data(); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type for (size_t i = 0; i < nds.size(); ++i) { const NDArray& nd = nds[i]; const nnvm::dim_t num_rows = nd.shape()[0]; const nnvm::dim_t num_cols = nd.shape()[1]; const TBlob& nd_data = nd.data(); if (i == 0) { if (nd.storage_type() == kDefaultStorage) { Kernel<op_with_req<mshadow_op::identity, kWriteTo>, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), nd_data.dptr<DType>()); continue; } else { Kernel<set_zero, gpu>::Launch(s, out_data.Size(), out_data.dptr<DType>()); } } switch (nd.storage_type()) { case kDefaultStorage: { Kernel<op_with_req<mshadow_op::plus, kWriteTo>, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>()); break; } case kCSRStorage: { const TBlob& nd_indices = nd.aux_data(csr::kIdx); const TBlob& nd_indptr = nd.aux_data(csr::kIndPtr); MSHADOW_IDX_TYPE_SWITCH(nd_indices.type_flag_, IType, { // indices type MSHADOW_IDX_TYPE_SWITCH(nd_indptr.type_flag_, CType, { // indptr type if (nd.storage_initialized()) { Kernel<ElemwiseDnsCsrDnsWarpKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, kWarpSize * num_rows, out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>(), nd_indices.dptr<IType>(), nd_indptr.dptr<CType>(), num_rows, num_cols); } }); }); break; } case kRowSparseStorage: { const TBlob& nd_indices = nd.aux_data(rowsparse::kIdx); MSHADOW_IDX_TYPE_SWITCH(nd_indices.type_flag_, IType, { // indices type if (nd.storage_initialized()) { const nnvm::dim_t nz_rows = nd_indices.Size(); Kernel<ElemwiseDnsRspDnsKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, nz_rows * num_cols, out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>(), nd_indices.dptr<IType>(), num_rows, nz_rows, num_cols); } }); break; } default: LOG(FATAL) << "unknown storage type " << nd.storage_type() << "encountered..."; } } }); } /*! * \brief Parallel gpu impl of elemwise sum for sparse tensors. * Currently only support row sparse sum. */ template<> void ElementwiseSum<gpu>(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { if (nds.empty()) return; if (common::ContainsOnlyStorage(nds, kRowSparseStorage)) { ElementwiseSumRspImpl(s, rsc, nds, out); } else if (nds.size() == 3U && nds[0].storage_type() == kDefaultStorage && nds[1].storage_type() == kCSRStorage && nds[2].storage_type() == kDefaultStorage && out->storage_type() == kDefaultStorage) { ElementwiseSumDnsCsrDnsImpl(s, rsc, nds, out); } else if (nds.size() > 4U && common::ContainsStorageType(nds, kDefaultStorage) && out->storage_type() == kDefaultStorage) { ElementwiseSumContainsDnsImpl(s, rsc, nds, out); } else { LOG(FATAL) << "ElementwiseSum<gpu> has not been implemented for storage_type = << " << nds[0].storage_type(); } } template<> void Eval<gpu>(mshadow::Stream<gpu> *s, const real_t val, const NDArray& dst) { NDArray temp = dst; const NDArrayStorageType stype = temp.storage_type(); if (stype == kRowSparseStorage) { SetValueRspImpl(s, val, &temp); } else { LOG(FATAL) << "Not implemented for storage type" << stype; } } } // namespace ndarray } // namespace mxnet
e50a863fdaf40becb3271a2c7e43e881bab8c0ea.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ndarray_function.cu * \brief GPU Implementation of ndarray function. */ // this will be invoked by nvcc and compile GPU version #include <cub/cub.cuh> #include <dmlc/logging.h> #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/elemwise_sum.h" #include "../operator/tensor/indexing_op.h" #include "../operator/tensor/init_op.h" #include "../operator/tensor/util/tensor_util-inl.h" #include "../operator/tensor/util/tensor_util-inl.cuh" #include "../common/cuda_utils.h" #include "./ndarray_function.h" #include "./ndarray_function-inl.h" #include "./ndarray_function-inl.cuh" namespace mxnet { namespace ndarray { template<> void Copy<cpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<gpu, DType>(), from.FlatTo1D<cpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, cpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { mshadow::Copy(to->FlatTo1D<cpu, DType>(), from.FlatTo1D<gpu, DType>(), ctx.get_stream<gpu>()); }); } template<> void Copy<gpu, gpu>(const TBlob &from, TBlob *to, Context from_ctx, Context to_ctx, RunContext ctx) { if (from_ctx.dev_id == to_ctx.dev_id) { mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { if (to->type_flag_ == from.type_flag_) { mshadow::Copy(to->FlatTo1D<gpu, DType>(s), from.FlatTo1D<gpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to->FlatTo1D<gpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<gpu, SrcDType>(s)); }) } }) } else { CHECK(from.CheckContiguous() && to->CheckContiguous()) << "copy across only support continugous memory"; CHECK_EQ(to->type_flag_, from.type_flag_) << "Source and target must have the same data type when copying across devices."; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); CHECK(s != NULL) << "need stream in GPU context"; cudaMemcpyPeerAsync(to->dptr_, to_ctx.dev_id, from.dptr_, from_ctx.dev_id, from.shape_.Size() * mshadow::mshadow_sizeof(to->type_flag_), s->stream_); } } /*! * \brief GPU impl of elemwise sum for rowsparse tensors. */ void ElementwiseSumRspImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace rowsparse; using nnvm::dim_t; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "Expected rowsparse storage_type (" << out->storage_type() << " given)"; int init = 0; for (const auto& nd : nds) { if (nd.storage_initialized()) { init++; break; } } if (init == 0) { FillZerosRspImpl(s, *out); return; } const dim_t num_rows = out->shape()[0]; const dim_t row_length = out->shape().ProdShape(1, out->shape().ndim()); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { // row_idx type // Allocate temporary storage for row_flg array and cub's prefix sum operation IType* row_flg = NULL; void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s); cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, stream); mshadow::Tensor<gpu, 1, char> workspace = rsc .get_space_typed<gpu, 1, char>(mshadow::Shape1(num_rows * sizeof(IType) + temp_storage_bytes), s); row_flg = reinterpret_cast<IType*>(workspace.dptr_); d_temp_storage = workspace.dptr_ + num_rows*sizeof(IType); // Mark row_flg array with 0 for zero rows and 1 for non-zero rows dim_t num_threads = num_rows; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, row_flg); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr; mxnet_op::Kernel<MarkRspRowFlgKernel, gpu>::Launch(s, num_threads, row_flg, nd_row_idx, nd_nnr); } } // Compute inclusive prefix sum over row_flg cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, row_flg, row_flg, num_rows, stream); // Get total number of output non-zero rows from GPU and allocate out data and row_idx dim_t nnr_out = 0; CUDA_CALL(cudaMemcpyAsync(&nnr_out, &row_flg[num_rows-1], sizeof(dim_t), cudaMemcpyDeviceToHost, stream)); CUDA_CALL(cudaStreamSynchronize(stream)); out->CheckAndAlloc({mshadow::Shape1(nnr_out)}); IType* out_row_idx = out->aux_data(kIdx).dptr<IType>(); DType* out_data = out->data().dptr<DType>(); // Fill row_idx array of output using row_flg num_threads = num_rows; mxnet_op::Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_threads, out_row_idx, row_flg, num_rows); // Perform elementwise addition, writing to output data num_threads = nnr_out * row_length; mxnet_op::Kernel<mxnet_op::set_zero, gpu>::Launch(s, num_threads, out_data); for (const auto& nd : nds) { if (nd.storage_initialized()) { const IType* nd_row_idx = nd.aux_data(kIdx).dptr<IType>(); const DType* nd_data = nd.data().dptr<DType>(); const dim_t nd_nnr = nd.storage_shape()[0]; num_threads = nd_nnr * row_length; mxnet_op::Kernel<ElementWiseRspAdditionKernel, gpu>::Launch(s, num_threads, out_data, row_flg, nd_row_idx, nd_data, nd_nnr, row_length); } } }); }); } void ElementwiseSumDnsCsrDnsImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace mxnet::op::mxnet_op; const TBlob& out_data = out->data(); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type Kernel<Sum, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), kWriteTo, nds[0].data().dptr<DType>(), nds[2].data().dptr<DType>()); const TBlob& csr_data = nds[1].data(); const TBlob& csr_indices = nds[1].aux_data(csr::kIdx); const TBlob& csr_indptr = nds[1].aux_data(csr::kIndPtr); const nnvm::dim_t num_rows = nds[1].shape()[0]; const nnvm::dim_t num_cols = nds[1].shape()[1]; MSHADOW_IDX_TYPE_SWITCH(csr_indices.type_flag_, IType, { // indices type MSHADOW_IDX_TYPE_SWITCH(csr_indptr.type_flag_, CType, { // indptr type if (nds[1].storage_initialized()) { Kernel<ElemwiseDnsCsrDnsWarpKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, kWarpSize * num_rows, out_data.dptr<DType>(), out_data.dptr<DType>(), csr_data.dptr<DType>(), csr_indices.dptr<IType>(), csr_indptr.dptr<CType>(), num_rows, num_cols); } }); }); }); } void ElementwiseSumContainsDnsImpl(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { using namespace mxnet::op; using namespace mxnet::op::mxnet_op; const TBlob& out_data = out->data(); MSHADOW_TYPE_SWITCH(out->dtype(), DType, { // data type for (size_t i = 0; i < nds.size(); ++i) { const NDArray& nd = nds[i]; const nnvm::dim_t num_rows = nd.shape()[0]; const nnvm::dim_t num_cols = nd.shape()[1]; const TBlob& nd_data = nd.data(); if (i == 0) { if (nd.storage_type() == kDefaultStorage) { Kernel<op_with_req<mshadow_op::identity, kWriteTo>, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), nd_data.dptr<DType>()); continue; } else { Kernel<set_zero, gpu>::Launch(s, out_data.Size(), out_data.dptr<DType>()); } } switch (nd.storage_type()) { case kDefaultStorage: { Kernel<op_with_req<mshadow_op::plus, kWriteTo>, gpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>()); break; } case kCSRStorage: { const TBlob& nd_indices = nd.aux_data(csr::kIdx); const TBlob& nd_indptr = nd.aux_data(csr::kIndPtr); MSHADOW_IDX_TYPE_SWITCH(nd_indices.type_flag_, IType, { // indices type MSHADOW_IDX_TYPE_SWITCH(nd_indptr.type_flag_, CType, { // indptr type if (nd.storage_initialized()) { Kernel<ElemwiseDnsCsrDnsWarpKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, kWarpSize * num_rows, out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>(), nd_indices.dptr<IType>(), nd_indptr.dptr<CType>(), num_rows, num_cols); } }); }); break; } case kRowSparseStorage: { const TBlob& nd_indices = nd.aux_data(rowsparse::kIdx); MSHADOW_IDX_TYPE_SWITCH(nd_indices.type_flag_, IType, { // indices type if (nd.storage_initialized()) { const nnvm::dim_t nz_rows = nd_indices.Size(); Kernel<ElemwiseDnsRspDnsKernel<kWriteTo, mshadow_op::plus>, gpu>::Launch( s, nz_rows * num_cols, out_data.dptr<DType>(), out_data.dptr<DType>(), nd_data.dptr<DType>(), nd_indices.dptr<IType>(), num_rows, nz_rows, num_cols); } }); break; } default: LOG(FATAL) << "unknown storage type " << nd.storage_type() << "encountered..."; } } }); } /*! * \brief Parallel gpu impl of elemwise sum for sparse tensors. * Currently only support row sparse sum. */ template<> void ElementwiseSum<gpu>(mshadow::Stream<gpu>* s, const Resource& rsc, const std::vector<NDArray>& nds, NDArray* out) { if (nds.empty()) return; if (common::ContainsOnlyStorage(nds, kRowSparseStorage)) { ElementwiseSumRspImpl(s, rsc, nds, out); } else if (nds.size() == 3U && nds[0].storage_type() == kDefaultStorage && nds[1].storage_type() == kCSRStorage && nds[2].storage_type() == kDefaultStorage && out->storage_type() == kDefaultStorage) { ElementwiseSumDnsCsrDnsImpl(s, rsc, nds, out); } else if (nds.size() > 4U && common::ContainsStorageType(nds, kDefaultStorage) && out->storage_type() == kDefaultStorage) { ElementwiseSumContainsDnsImpl(s, rsc, nds, out); } else { LOG(FATAL) << "ElementwiseSum<gpu> has not been implemented for storage_type = << " << nds[0].storage_type(); } } template<> void Eval<gpu>(mshadow::Stream<gpu> *s, const real_t val, const NDArray& dst) { NDArray temp = dst; const NDArrayStorageType stype = temp.storage_type(); if (stype == kRowSparseStorage) { SetValueRspImpl(s, val, &temp); } else { LOG(FATAL) << "Not implemented for storage type" << stype; } } } // namespace ndarray } // namespace mxnet
8112b0420c2b3a5e629cd5c947d8c344625daea7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <SDL.h> #include <GL/glew.h> #include <exception> #include <chrono> #include <tuple> #include <complex> #include <iomanip> #include <cassert> #include <cuda_gl_interop.h> #include <surface_functions.h> #include "geom.h" #include "world.h" using namespace std::chrono_literals; void printVal(GLenum tp, std::string name) { const GLubyte* sv = glGetString(tp); if (sv == nullptr) { std::cerr << "can't get " << name << ": " << glewGetErrorString(glGetError()) << std::endl; } else { std::cerr << name << ": " << reinterpret_cast<const char*>(sv) << std::endl; } } class SDLError : public std::runtime_error { using std::runtime_error::runtime_error; }; struct SDLOpenGLContext { SDL_Window *win = nullptr; SDL_GLContext ctx = nullptr; SDLOpenGLContext(const std::string& title, int x, int y, int w, int h, uint32_t flags) { if (SDL_Init(SDL_INIT_VIDEO) != 0) { throw SDLError(std::string("SDL_Init: ") + SDL_GetError()); } assert(0 == SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE)); assert(0 == SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4)); assert(0 == SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 6)); win = SDL_CreateWindow(title.c_str(), x, y, w, h, flags); if (!win) { SDL_Quit(); throw SDLError(std::string("SDL_CreateWindow: ") + SDL_GetError()); } ctx = SDL_GL_CreateContext(win); if (ctx == nullptr) { SDL_DestroyWindow(win); SDL_Quit(); throw SDLError(std::string("SDL_GL_CreateContext: ") + SDL_GetError()); } // ////... glewExperimental GLenum glewError = glewInit(); if (glewError != GLEW_OK) { SDL_GL_DeleteContext(ctx); SDL_DestroyWindow(win); SDL_Quit(); throw std::runtime_error( std::string("glewInit: ") + reinterpret_cast<const char *>(glewGetErrorString(glewError)) ); } // maybe vsync } ~SDLOpenGLContext() { SDL_GL_DeleteContext(ctx); SDL_DestroyWindow(win); SDL_Quit(); } }; struct Axis { bool neg = false, pos = false; float delta() { return (neg * -1.0f + pos * 1.0f); } }; template< typename T > std::string int_to_hex( T i ) { std::stringstream stream; stream << "0x" << std::setfill ('0') << std::setw(sizeof(T)*2) << std::hex << i; return stream.str(); } void checkErr(int line_num, std::string line) { GLenum err = glGetError(); if (err != GL_NO_ERROR) { std::cerr << line_num << ": " << line << "\ngl error: " << glewGetErrorString(err) << " (" << int_to_hex(err) << ")" << std::endl; throw std::runtime_error(std::to_string(err)); } } #define glGuard(expr) \ do { \ glGetError(); \ expr; \ checkErr(__LINE__, #expr); \ } while (false) #define cudaCheck(expr) _cudaCheck((expr), #expr, __FILE__, __LINE__) inline void _cudaCheck(hipError_t code, const char* expr, const char *file, int line) { if (code == hipSuccess) return; std::cerr << "Cuda call failed at " << file << ":" << line << ": " << expr << ":\n" << hipGetErrorString(code); exit(1); } __global__ void render(int w, int h, hipSurfaceObject_t surf, World world, float t) { float k = ::min(w, h) / 2.0f; int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = blockDim.x * gridDim.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; int stride_y = blockDim.y * gridDim.y; for (int y = idx_y; y < h; y += stride_y) { for (int x = idx_x; x < w; x += stride_x) { float rel_x = (x + 0.5f - w / 2.0f) / k; float rel_y = (y + 0.5f - h / 2.0f) / k; auto col = world.viewAt(rel_x, rel_y, t); surf2Dwrite(make_uchar4( std::get<0>(col), std::get<1>(col), std::get<2>(col), 0 ), surf, x * 4, y); } } } void dow() { const int W = 1900; const int H = 1000; auto ctx = SDLOpenGLContext("Hello!", 100, 100, W, H, SDL_WINDOW_SHOWN | SDL_WINDOW_OPENGL); printVal(GL_RENDERER, "GL_VENDOR"); printVal(GL_VENDOR, "GL_RENDERER"); printVal(GL_VERSION, "GL_VERSION"); // https://stackoverflow.com/questions/31482816/opengl-is-there-an-easier-way-to-fill-window-with-a-texture-instead-using-vbo GLuint fb = 0; glGuard(glGenFramebuffers(1, &fb)); glGuard(glBindFramebuffer(GL_READ_FRAMEBUFFER, fb)); GLuint tex = 0; glGuard(glGenTextures(1, &tex)); glGuard(glBindTexture(GL_TEXTURE_2D, tex)); glGuard(glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, W, H, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr)); glGuard(glBindTexture(GL_TEXTURE_2D, 0)); glGuard(glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0)); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); // https://forums.developer.nvidia.com/t/reading-and-writing-opengl-textures-with-cuda/31746/6 cudaGraphicsResource *resource; cudaCheck(hipGraphicsGLRegisterImage(&resource, tex, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore)); cudaCheck(hipGraphicsMapResources(1, &resource)); hipArray_t writeArray; cudaCheck(hipGraphicsSubResourceGetMappedArray(&writeArray, resource, 0, 0)); hipResourceDesc descr = {}; descr.resType = hipResourceTypeArray; descr.res.array.array = writeArray; hipSurfaceObject_t surf; cudaCheck(hipCreateSurfaceObject(&surf, &descr)); Axis dx, dy, dphi, dz; World world; std::cout << std::endl; auto prev_frame = std::chrono::steady_clock::now(); auto start = prev_frame; while (true) { auto now = std::chrono::steady_clock::now(); float dt = std::chrono::duration_cast<std::chrono::duration<float, std::chrono::seconds::period>>(now - prev_frame).count(); prev_frame = now; std::cout << world.curr_pos.x << " " << world.curr_pos.y << " " << world.curr_dir.x << " " << world.curr_dir.y << " " << dt - 1/60.0f << " \r"; SDL_Event evt; bool quit = false; while (SDL_PollEvent(&evt)) { if (evt.type == SDL_QUIT) { quit = true; } if (evt.type == SDL_KEYDOWN) { switch (evt.key.keysym.sym) { case (SDLK_d): dx.pos = true; break; case (SDLK_a): dx.neg = true; break; case (SDLK_w): dy.pos = true; break; case (SDLK_s): dy.neg = true; break; case (SDLK_q): dphi.pos = true; break; case (SDLK_e): dphi.neg = true; break; case (SDLK_z): dz.pos = true; break; case (SDLK_x): dz.neg = true; break; } } if (evt.type == SDL_KEYUP) { switch (evt.key.keysym.sym) { case (SDLK_d): dx.pos = false; break; case (SDLK_a): dx.neg = false; break; case (SDLK_w): dy.pos = false; break; case (SDLK_s): dy.neg = false; break; case (SDLK_q): dphi.pos = false; break; case (SDLK_e): dphi.neg = false; break; case (SDLK_z): dz.pos = false; break; case (SDLK_x): dz.neg = false; break; } } } if (quit) break; float t = std::chrono::duration_cast<std::chrono::duration<float, std::chrono::seconds::period>>(prev_frame - start).count(); world.rotate(dphi.delta() * dt); Vec relWalkDir = Vec( dx.delta(), dy.delta() ); world.walk(relWalkDir, dt, t); world.zoom(dz.delta() * dt); hipLaunchKernelGGL(( render), dim3(10), dim3(256), 0, 0, W, H, surf, world, t); hipDeviceSynchronize(); // TODO: clear? glGuard(glBlitFramebuffer(0, 0, W, H, 0, 0, W, H, GL_COLOR_BUFFER_BIT, GL_NEAREST)); SDL_GL_SwapWindow(ctx.win); auto left = std::chrono::steady_clock::now() - prev_frame; SDL_Delay(::max(0.0f, 1000.0f / 60 - std::chrono::duration_cast<std::chrono::milliseconds>(left).count())); } cudaCheck(hipDestroySurfaceObject(surf)); cudaCheck(hipGraphicsUnmapResources(1, &resource)); cudaCheck(hipGraphicsUnregisterResource(resource)); } int main(int, char**) { try { dow(); } catch (const std::exception &e) { std::cerr << e.what() << std::endl; return 1; } return 0; }
8112b0420c2b3a5e629cd5c947d8c344625daea7.cu
#include <iostream> #include <SDL.h> #include <GL/glew.h> #include <exception> #include <chrono> #include <tuple> #include <complex> #include <iomanip> #include <cassert> #include <cuda_gl_interop.h> #include <surface_functions.h> #include "geom.h" #include "world.h" using namespace std::chrono_literals; void printVal(GLenum tp, std::string name) { const GLubyte* sv = glGetString(tp); if (sv == nullptr) { std::cerr << "can't get " << name << ": " << glewGetErrorString(glGetError()) << std::endl; } else { std::cerr << name << ": " << reinterpret_cast<const char*>(sv) << std::endl; } } class SDLError : public std::runtime_error { using std::runtime_error::runtime_error; }; struct SDLOpenGLContext { SDL_Window *win = nullptr; SDL_GLContext ctx = nullptr; SDLOpenGLContext(const std::string& title, int x, int y, int w, int h, uint32_t flags) { if (SDL_Init(SDL_INIT_VIDEO) != 0) { throw SDLError(std::string("SDL_Init: ") + SDL_GetError()); } assert(0 == SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE)); assert(0 == SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4)); assert(0 == SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 6)); win = SDL_CreateWindow(title.c_str(), x, y, w, h, flags); if (!win) { SDL_Quit(); throw SDLError(std::string("SDL_CreateWindow: ") + SDL_GetError()); } ctx = SDL_GL_CreateContext(win); if (ctx == nullptr) { SDL_DestroyWindow(win); SDL_Quit(); throw SDLError(std::string("SDL_GL_CreateContext: ") + SDL_GetError()); } // ////... glewExperimental GLenum glewError = glewInit(); if (glewError != GLEW_OK) { SDL_GL_DeleteContext(ctx); SDL_DestroyWindow(win); SDL_Quit(); throw std::runtime_error( std::string("glewInit: ") + reinterpret_cast<const char *>(glewGetErrorString(glewError)) ); } // maybe vsync } ~SDLOpenGLContext() { SDL_GL_DeleteContext(ctx); SDL_DestroyWindow(win); SDL_Quit(); } }; struct Axis { bool neg = false, pos = false; float delta() { return (neg * -1.0f + pos * 1.0f); } }; template< typename T > std::string int_to_hex( T i ) { std::stringstream stream; stream << "0x" << std::setfill ('0') << std::setw(sizeof(T)*2) << std::hex << i; return stream.str(); } void checkErr(int line_num, std::string line) { GLenum err = glGetError(); if (err != GL_NO_ERROR) { std::cerr << line_num << ": " << line << "\ngl error: " << glewGetErrorString(err) << " (" << int_to_hex(err) << ")" << std::endl; throw std::runtime_error(std::to_string(err)); } } #define glGuard(expr) \ do { \ glGetError(); \ expr; \ checkErr(__LINE__, #expr); \ } while (false) #define cudaCheck(expr) _cudaCheck((expr), #expr, __FILE__, __LINE__) inline void _cudaCheck(cudaError_t code, const char* expr, const char *file, int line) { if (code == cudaSuccess) return; std::cerr << "Cuda call failed at " << file << ":" << line << ": " << expr << ":\n" << cudaGetErrorString(code); exit(1); } __global__ void render(int w, int h, cudaSurfaceObject_t surf, World world, float t) { float k = std::min(w, h) / 2.0f; int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = blockDim.x * gridDim.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; int stride_y = blockDim.y * gridDim.y; for (int y = idx_y; y < h; y += stride_y) { for (int x = idx_x; x < w; x += stride_x) { float rel_x = (x + 0.5f - w / 2.0f) / k; float rel_y = (y + 0.5f - h / 2.0f) / k; auto col = world.viewAt(rel_x, rel_y, t); surf2Dwrite(make_uchar4( std::get<0>(col), std::get<1>(col), std::get<2>(col), 0 ), surf, x * 4, y); } } } void dow() { const int W = 1900; const int H = 1000; auto ctx = SDLOpenGLContext("Hello!", 100, 100, W, H, SDL_WINDOW_SHOWN | SDL_WINDOW_OPENGL); printVal(GL_RENDERER, "GL_VENDOR"); printVal(GL_VENDOR, "GL_RENDERER"); printVal(GL_VERSION, "GL_VERSION"); // https://stackoverflow.com/questions/31482816/opengl-is-there-an-easier-way-to-fill-window-with-a-texture-instead-using-vbo GLuint fb = 0; glGuard(glGenFramebuffers(1, &fb)); glGuard(glBindFramebuffer(GL_READ_FRAMEBUFFER, fb)); GLuint tex = 0; glGuard(glGenTextures(1, &tex)); glGuard(glBindTexture(GL_TEXTURE_2D, tex)); glGuard(glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, W, H, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr)); glGuard(glBindTexture(GL_TEXTURE_2D, 0)); glGuard(glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0)); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); // https://forums.developer.nvidia.com/t/reading-and-writing-opengl-textures-with-cuda/31746/6 cudaGraphicsResource *resource; cudaCheck(cudaGraphicsGLRegisterImage(&resource, tex, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore)); cudaCheck(cudaGraphicsMapResources(1, &resource)); cudaArray_t writeArray; cudaCheck(cudaGraphicsSubResourceGetMappedArray(&writeArray, resource, 0, 0)); cudaResourceDesc descr = {}; descr.resType = cudaResourceTypeArray; descr.res.array.array = writeArray; cudaSurfaceObject_t surf; cudaCheck(cudaCreateSurfaceObject(&surf, &descr)); Axis dx, dy, dphi, dz; World world; std::cout << std::endl; auto prev_frame = std::chrono::steady_clock::now(); auto start = prev_frame; while (true) { auto now = std::chrono::steady_clock::now(); float dt = std::chrono::duration_cast<std::chrono::duration<float, std::chrono::seconds::period>>(now - prev_frame).count(); prev_frame = now; std::cout << world.curr_pos.x << " " << world.curr_pos.y << " " << world.curr_dir.x << " " << world.curr_dir.y << " " << dt - 1/60.0f << " \r"; SDL_Event evt; bool quit = false; while (SDL_PollEvent(&evt)) { if (evt.type == SDL_QUIT) { quit = true; } if (evt.type == SDL_KEYDOWN) { switch (evt.key.keysym.sym) { case (SDLK_d): dx.pos = true; break; case (SDLK_a): dx.neg = true; break; case (SDLK_w): dy.pos = true; break; case (SDLK_s): dy.neg = true; break; case (SDLK_q): dphi.pos = true; break; case (SDLK_e): dphi.neg = true; break; case (SDLK_z): dz.pos = true; break; case (SDLK_x): dz.neg = true; break; } } if (evt.type == SDL_KEYUP) { switch (evt.key.keysym.sym) { case (SDLK_d): dx.pos = false; break; case (SDLK_a): dx.neg = false; break; case (SDLK_w): dy.pos = false; break; case (SDLK_s): dy.neg = false; break; case (SDLK_q): dphi.pos = false; break; case (SDLK_e): dphi.neg = false; break; case (SDLK_z): dz.pos = false; break; case (SDLK_x): dz.neg = false; break; } } } if (quit) break; float t = std::chrono::duration_cast<std::chrono::duration<float, std::chrono::seconds::period>>(prev_frame - start).count(); world.rotate(dphi.delta() * dt); Vec relWalkDir = Vec( dx.delta(), dy.delta() ); world.walk(relWalkDir, dt, t); world.zoom(dz.delta() * dt); render<<<10, 256>>>(W, H, surf, world, t); cudaDeviceSynchronize(); // TODO: clear? glGuard(glBlitFramebuffer(0, 0, W, H, 0, 0, W, H, GL_COLOR_BUFFER_BIT, GL_NEAREST)); SDL_GL_SwapWindow(ctx.win); auto left = std::chrono::steady_clock::now() - prev_frame; SDL_Delay(std::max(0.0f, 1000.0f / 60 - std::chrono::duration_cast<std::chrono::milliseconds>(left).count())); } cudaCheck(cudaDestroySurfaceObject(surf)); cudaCheck(cudaGraphicsUnmapResources(1, &resource)); cudaCheck(cudaGraphicsUnregisterResource(resource)); } int main(int, char**) { try { dow(); } catch (const std::exception &e) { std::cerr << e.what() << std::endl; return 1; } return 0; }
edc50f866fb334f2a1acf37fdedb3ced95fd87bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <vector> #include <memory> #include <utility> #include <tuple> #include <cudf/table/table.hpp> #include <cudf/join.hpp> #include <cudf/sorting.hpp> #include <cudf/types.hpp> #include "../src/communicator.h" #include "../src/error.cuh" #include "../src/generate_table.cuh" #include "../src/distribute_table.cuh" #include "../src/distributed_join.cuh" #define KEY_T int #define PAYLOAD_T int static constexpr cudf::size_type BUILD_TABLE_SIZE = 1'000'000; static constexpr cudf::size_type PROBE_TABLE_SIZE = 5'000'000; static constexpr double SELECTIVITY = 0.3; static constexpr KEY_T RAND_MAX_VAL = 2'000'000; static constexpr bool IS_BUILD_TABLE_KEY_UNIQUE = true; static constexpr int OVER_DECOMPOSITION_FACTOR = 10; using cudf::experimental::table; template<typename data_type> __global__ void verify_correctness(const data_type *data1, const data_type *data2, cudf::size_type size) { const cudf::size_type start_idx = threadIdx.x + blockDim.x * blockIdx.x; const cudf::size_type stride = blockDim.x * gridDim.x; for (cudf::size_type idx = start_idx; idx < size; idx += stride) { assert(data1[idx] == data2[idx]); } } int main(int argc, char *argv[]) { /* Initialize communication */ UCXBufferCommunicator communicator; communicator.initialize(argc, argv); int mpi_rank {communicator.mpi_rank}; int mpi_size {communicator.mpi_size}; communicator.setup_cache(2 * 3 * 2 * 2 * mpi_size, 1'000'000LL); communicator.warmup_cache(); /* Generate build table and probe table and compute reference solution */ std::unique_ptr<table> build; std::unique_ptr<table> probe; std::unique_ptr<table> reference; cudf::table_view build_view; cudf::table_view probe_view; if (mpi_rank == 0) { std::tie(build, probe) = generate_build_probe_tables<KEY_T, PAYLOAD_T>( BUILD_TABLE_SIZE, PROBE_TABLE_SIZE, SELECTIVITY, RAND_MAX_VAL, IS_BUILD_TABLE_KEY_UNIQUE ); build_view = build->view(); probe_view = probe->view(); reference = cudf::experimental::inner_join( build->view(), probe->view(), {0}, {0}, {std::pair<cudf::size_type, cudf::size_type>(0, 0)} ); } std::unique_ptr<table> local_build = distribute_table(build_view, &communicator); std::unique_ptr<table> local_probe = distribute_table(probe_view, &communicator); /* Distributed join */ std::unique_ptr<table> join_result_all_ranks = distributed_inner_join( local_build->view(), local_probe->view(), {0}, {0}, {std::pair<cudf::size_type, cudf::size_type>(0, 0)}, &communicator, OVER_DECOMPOSITION_FACTOR ); /* Send join result from all ranks to the root rank */ std::unique_ptr<table> join_result = collect_tables( join_result_all_ranks->view(), &communicator ); /* Verify correctness */ if (mpi_rank == 0) { // Although join_result and reference should contain the same table, rows may be reordered. // Therefore, we first sort both tables and then compare cudf::size_type nrows = reference->num_rows(); assert(join_result->num_rows() == nrows); std::unique_ptr<table> join_sorted = cudf::experimental::sort(join_result->view()); std::unique_ptr<table> reference_sorted = cudf::experimental::sort(reference->view()); // Get the number of thread blocks based on thread block size const int block_size = 128; int nblocks {-1}; CUDA_RT_CALL( hipOccupancyMaxActiveBlocksPerMultiprocessor( &nblocks, verify_correctness<KEY_T>, block_size, 0 ) ); // There should be three columns in the result table. The first column is the joined key // column. The second and third column comes from the payload column from the left and // the right input table, respectively. // Verify the first column (key column) is correct. hipLaunchKernelGGL(( verify_correctness<KEY_T>), dim3(nblocks), dim3(block_size), 0, 0, join_sorted->view().column(0).head<KEY_T>(), reference_sorted->view().column(0).head<KEY_T>(), nrows ); // Verify the remaining two payload columns are correct. for (cudf::size_type icol = 1; icol <= 2; icol++) { hipLaunchKernelGGL(( verify_correctness<PAYLOAD_T>), dim3(nblocks), dim3(block_size), 0, 0, join_sorted->view().column(icol).head<PAYLOAD_T>(), reference_sorted->view().column(icol).head<PAYLOAD_T>(), nrows ); } } /* Cleanup */ communicator.finalize(); if (mpi_rank == 0) { std::cerr << "Test case \"compare_against_shared\" passes successfully.\n"; } return 0; }
edc50f866fb334f2a1acf37fdedb3ced95fd87bf.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <vector> #include <memory> #include <utility> #include <tuple> #include <cudf/table/table.hpp> #include <cudf/join.hpp> #include <cudf/sorting.hpp> #include <cudf/types.hpp> #include "../src/communicator.h" #include "../src/error.cuh" #include "../src/generate_table.cuh" #include "../src/distribute_table.cuh" #include "../src/distributed_join.cuh" #define KEY_T int #define PAYLOAD_T int static constexpr cudf::size_type BUILD_TABLE_SIZE = 1'000'000; static constexpr cudf::size_type PROBE_TABLE_SIZE = 5'000'000; static constexpr double SELECTIVITY = 0.3; static constexpr KEY_T RAND_MAX_VAL = 2'000'000; static constexpr bool IS_BUILD_TABLE_KEY_UNIQUE = true; static constexpr int OVER_DECOMPOSITION_FACTOR = 10; using cudf::experimental::table; template<typename data_type> __global__ void verify_correctness(const data_type *data1, const data_type *data2, cudf::size_type size) { const cudf::size_type start_idx = threadIdx.x + blockDim.x * blockIdx.x; const cudf::size_type stride = blockDim.x * gridDim.x; for (cudf::size_type idx = start_idx; idx < size; idx += stride) { assert(data1[idx] == data2[idx]); } } int main(int argc, char *argv[]) { /* Initialize communication */ UCXBufferCommunicator communicator; communicator.initialize(argc, argv); int mpi_rank {communicator.mpi_rank}; int mpi_size {communicator.mpi_size}; communicator.setup_cache(2 * 3 * 2 * 2 * mpi_size, 1'000'000LL); communicator.warmup_cache(); /* Generate build table and probe table and compute reference solution */ std::unique_ptr<table> build; std::unique_ptr<table> probe; std::unique_ptr<table> reference; cudf::table_view build_view; cudf::table_view probe_view; if (mpi_rank == 0) { std::tie(build, probe) = generate_build_probe_tables<KEY_T, PAYLOAD_T>( BUILD_TABLE_SIZE, PROBE_TABLE_SIZE, SELECTIVITY, RAND_MAX_VAL, IS_BUILD_TABLE_KEY_UNIQUE ); build_view = build->view(); probe_view = probe->view(); reference = cudf::experimental::inner_join( build->view(), probe->view(), {0}, {0}, {std::pair<cudf::size_type, cudf::size_type>(0, 0)} ); } std::unique_ptr<table> local_build = distribute_table(build_view, &communicator); std::unique_ptr<table> local_probe = distribute_table(probe_view, &communicator); /* Distributed join */ std::unique_ptr<table> join_result_all_ranks = distributed_inner_join( local_build->view(), local_probe->view(), {0}, {0}, {std::pair<cudf::size_type, cudf::size_type>(0, 0)}, &communicator, OVER_DECOMPOSITION_FACTOR ); /* Send join result from all ranks to the root rank */ std::unique_ptr<table> join_result = collect_tables( join_result_all_ranks->view(), &communicator ); /* Verify correctness */ if (mpi_rank == 0) { // Although join_result and reference should contain the same table, rows may be reordered. // Therefore, we first sort both tables and then compare cudf::size_type nrows = reference->num_rows(); assert(join_result->num_rows() == nrows); std::unique_ptr<table> join_sorted = cudf::experimental::sort(join_result->view()); std::unique_ptr<table> reference_sorted = cudf::experimental::sort(reference->view()); // Get the number of thread blocks based on thread block size const int block_size = 128; int nblocks {-1}; CUDA_RT_CALL( cudaOccupancyMaxActiveBlocksPerMultiprocessor( &nblocks, verify_correctness<KEY_T>, block_size, 0 ) ); // There should be three columns in the result table. The first column is the joined key // column. The second and third column comes from the payload column from the left and // the right input table, respectively. // Verify the first column (key column) is correct. verify_correctness<KEY_T><<<nblocks, block_size>>>( join_sorted->view().column(0).head<KEY_T>(), reference_sorted->view().column(0).head<KEY_T>(), nrows ); // Verify the remaining two payload columns are correct. for (cudf::size_type icol = 1; icol <= 2; icol++) { verify_correctness<PAYLOAD_T><<<nblocks, block_size>>>( join_sorted->view().column(icol).head<PAYLOAD_T>(), reference_sorted->view().column(icol).head<PAYLOAD_T>(), nrows ); } } /* Cleanup */ communicator.finalize(); if (mpi_rank == 0) { std::cerr << "Test case \"compare_against_shared\" passes successfully.\n"; } return 0; }
b894b25e26b3b5af00c0c225c65e934a080db163.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************** * * COMP 193 * GPU programming * Exercise 1 template * **************************************************************************/ #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> // includes random num stuff #include <hiprand/hiprand_kernel.h> // more rand stuff #include <hip/hip_texture_types.h> #include "book.h" #include <stdio.h> #include "gpu_main.h" /*************************************************************************/ void addGPU(int *a, int *b, int *c, unsigned long vecSize){ printf("you can remove this print statement\n"); // arrays to pass to gpu int *dev_a, *dev_b, *dev_c; HANDLE_ERROR( hipMalloc( (void **) &dev_a, vecSize * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void **) &dev_b, vecSize * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void **) &dev_c, vecSize * sizeof(int) ) ); // copy into these arrays HANDLE_ERROR( hipMemcpy( dev_a, a, vecSize * sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR( hipMemcpy( dev_b, b, vecSize * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( add), dim3(vecSize), dim3(1), 0, 0, dev_a, dev_b, dev_c, vecSize); // copy back to device to fill c with results HANDLE_ERROR( hipMemcpy( c, dev_c, vecSize * sizeof(int), hipMemcpyDeviceToHost) ); // memory clean up HANDLE_ERROR( hipFree( dev_a ) ); HANDLE_ERROR( hipFree( dev_b ) ); HANDLE_ERROR( hipFree( dev_c ) ); } /*************************************************************************/ /* * kernal function to add arrays in a and b */ __global__ void add(int *a, int *b, int *c, unsigned long vecSize) { int tid = blockIdx.x; if (tid < vecSize) { c[tid] = a[tid] + b[tid]; } }
b894b25e26b3b5af00c0c225c65e934a080db163.cu
/************************************************************************** * * COMP 193 * GPU programming * Exercise 1 template * **************************************************************************/ #include <cuda.h> #include <curand.h> // includes random num stuff #include <curand_kernel.h> // more rand stuff #include <cuda_texture_types.h> #include "book.h" #include <stdio.h> #include "gpu_main.h" /*************************************************************************/ void addGPU(int *a, int *b, int *c, unsigned long vecSize){ printf("you can remove this print statement\n"); // arrays to pass to gpu int *dev_a, *dev_b, *dev_c; HANDLE_ERROR( cudaMalloc( (void **) &dev_a, vecSize * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void **) &dev_b, vecSize * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void **) &dev_c, vecSize * sizeof(int) ) ); // copy into these arrays HANDLE_ERROR( cudaMemcpy( dev_a, a, vecSize * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR( cudaMemcpy( dev_b, b, vecSize * sizeof(int), cudaMemcpyHostToDevice)); add<<<vecSize, 1>>>( dev_a, dev_b, dev_c, vecSize); // copy back to device to fill c with results HANDLE_ERROR( cudaMemcpy( c, dev_c, vecSize * sizeof(int), cudaMemcpyDeviceToHost) ); // memory clean up HANDLE_ERROR( cudaFree( dev_a ) ); HANDLE_ERROR( cudaFree( dev_b ) ); HANDLE_ERROR( cudaFree( dev_c ) ); } /*************************************************************************/ /* * kernal function to add arrays in a and b */ __global__ void add(int *a, int *b, int *c, unsigned long vecSize) { int tid = blockIdx.x; if (tid < vecSize) { c[tid] = a[tid] + b[tid]; } }
e9809f3f9d47cbfd5ec3bee33786d17b3fc7a179.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_matrix_mult.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); float *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int m = 2; int n = XSIZE*YSIZE; int k = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_matrix_mult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,m,n,k); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_matrix_mult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,m,n,k); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_matrix_mult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,m,n,k); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e9809f3f9d47cbfd5ec3bee33786d17b3fc7a179.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_matrix_mult.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); float *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int m = 2; int n = XSIZE*YSIZE; int k = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_matrix_mult<<<gridBlock,threadBlock>>>(a,b,c,m,n,k); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_matrix_mult<<<gridBlock,threadBlock>>>(a,b,c,m,n,k); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_matrix_mult<<<gridBlock,threadBlock>>>(a,b,c,m,n,k); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c17d003ec95c0ca785cd2fe2ca07710527f6ebc8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //scan.cu //#include "kernel.hip" #include "comm.h" #include "wtime.h" #include "iostream" #define max_thd 256 #define max_block 256 #define thread_limit 256 graph * mygraph; __global__ void block_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/max_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ mycount++; bot = top + max_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/max_thd; top = top*m/max_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += gridDim.x*blockDim.x/ max_thd; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } // count[blockIdx.x]+=val; count[blockIdx.x]=val; } } __global__ void warp_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } tid += blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } //---------------------------------------------------------------------------------------- __global__ void classify_kernel //step 1: classify the edge list into different arrays ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, //inputs index_t* small_num, index_t* mid_num, index_t* large_num //outputs: small/large head, adjacent, and number by thread ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t small_offset=0; index_t mid_offset=0; // index_t large_offset=0; //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ small_offset++; } else{ //could be more then 2 catigories mid_offset++; } // else { //could be more then 2 catigories // large_offset++; // } } } small_num[tid] = small_offset; mid_num[tid] = mid_offset; // large_num[tid] = large_offset; } __global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256 ( index_t* data, index_t* block_offset ) { //step 1: each block do prefix sum inside int tid = threadIdx.x +blockIdx.x*blockDim.x; __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = data[tid]; __syncthreads(); index_t val=0; for(int i=0; i<=threadIdx.x; i++){ val += temp_in[i]; } __syncthreads(); if(threadIdx.x==255){ block_offset[blockIdx.x] = val; } data[tid] = val; __syncthreads(); } __global__ void prefix_kernel_2 ( index_t* block_offset ) { //step 2: collect each block's offset and do prefix for this set __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = block_offset[threadIdx.x]; __syncthreads(); index_t val=0; for(int i=0; i<threadIdx.x; i++){ val += temp_in[i]; } // val = temp_in[threadIdx.x]; block_offset[threadIdx.x] = val; __syncthreads(); } __global__ void prefix_kernel_3 ( index_t* data, index_t* block_offset ) { //step 3: update by adding block offset int tid = threadIdx.x + blockIdx.x*blockDim.x; index_t val = data[tid]; index_t offset = block_offset[blockIdx.x]; val += offset; data[tid] = val; __syncthreads(); } __global__ void collect_kernel ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, index_t* small_num, index_t* mid_num, index_t* large_num, index_t N1, index_t N2, vertex_t* dest_head, vertex_t* dest_adj ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t thd_base_small = 0; index_t thd_base_mid = N1; // index_t thd_base_large = N1+N2; if(tid!=0){ thd_base_small = small_num[tid-1]; thd_base_mid = N1 + mid_num[tid-1]; // thd_base_large = N1 + N2 + large_num[tid-1]; } //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; index_t small_offset = thd_base_small; index_t mid_offset = thd_base_mid; // index_t large_offset = thd_base_large; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ dest_head[small_offset] = head; dest_adj [small_offset] = adj; small_offset++; } else{ //could be more then 2 catigories dest_head[mid_offset] = head; dest_adj [mid_offset] = adj; mid_offset++; } // else { //could be more then 2 catigories // dest_head[large_offset] = head; // dest_adj [large_offset] = adj; // large_offset++; // } } } } __global__ void reduce_kernel2(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void* part_scan(void * data){ index_t thd_count=0; int GPU_id = *(int*)data; int i = GPU_id; // cout<<"GPU id = "<<GPU_id<<"\n"; hipSetDevice(GPU_id); H_ERR(hipDeviceSynchronize() ); vertex_t vert_count = mygraph->vert_count; vertex_t* dev_adj; index_t* dev_begin; index_t* dev_count; index_t* block_offset; H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&block_offset, max_block*sizeof(index_t)) ); index_t partEdgeCount = mygraph->partEdgeCount[i]; vertex_t* partAdj = mygraph->partAdj[i]; index_t* partBegin = mygraph->partBegin[i]; index_t* count = mygraph->count; H_ERR(hipMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) ); //---------------- go into loop------------------- double time2=wtime(); for(int j=0; j<PART_NUM; j++){ //---------------- check ------------------------- if(mygraph->ds_status[i * GPU_NUM + j]>0){ break; } mygraph->ds_status[i * GPU_NUM + j]=1; mygraph->ds_complete[i]++; //---------------- run --------------------------- index_t totalEdgeCount = mygraph->partEdgeCount[j]; vertex_t* head = mygraph->partHead[j]; vertex_t* adj = mygraph->partAdj[j]; vertex_t* src_head; vertex_t* src_adj; H_ERR(hipMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); // double time1=wtime(); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( block_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0, src_head, src_adj, dev_adj, dev_begin, 0, totalEdgeCount, dev_count ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_kernel2) , dim3(1),dim3(1), 0, 0, dev_count); H_ERR(hipDeviceSynchronize() ); H_ERR(hipMemcpy(&count[i], dev_count, sizeof(index_t), hipMemcpyDeviceToHost)); thd_count += count[i]; H_ERR(hipFree(src_head) ); H_ERR(hipFree(src_adj) ); // cout<<"GPU "<<i<<" part "<<j<<"\n"; //---------------- write result --------------------------- mygraph->ds_count[i * GPU_NUM + j] = count[i]; } //---------------- work ending --------------------------- //---------------- work stealing --------------------------- int check = 0; for(int k=0; k<GPU_NUM; k++){ check += mygraph->ds_complete[k]; } while(check<GPU_NUM*GPU_NUM){ //step 1: looking for the GPU with most remaining work int min=0; for(int k=GPU_NUM-1; k>=0; k--){ if(mygraph->ds_complete[k]<mygraph->ds_complete[min]){ min = k; } if(mygraph->ds_complete[k] == mygraph->ds_complete[min]){ if(mygraph->ds_help[k] < mygraph->ds_help[min]){ min = k; } } } mygraph->ds_help[min]++; if(mygraph->ds_complete[min]==GPU_NUM){ for(int k=0; k<GPU_NUM; k++){ check += mygraph->ds_complete[k]; } continue; } //step 2: check ds_complete array and start moving data H_ERR(hipFree(dev_adj) ); index_t partEdgeCount = mygraph->partEdgeCount[min]; partAdj = mygraph->partAdj[min]; partBegin = mygraph->partBegin[min]; H_ERR(hipMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) ); //step 3: set flags and work int j = GPU_NUM - 1; while(j>=0){ if(mygraph->ds_status[min * GPU_NUM + j]>0){ j--; continue; } mygraph->ds_status[min * GPU_NUM + j] = 1; mygraph->ds_complete[min]++; //work index_t totalEdgeCount = mygraph->partEdgeCount[j]; vertex_t* head = mygraph->partHead[j]; vertex_t* adj = mygraph->partAdj[j]; vertex_t* src_head; vertex_t* src_adj; H_ERR(hipMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); // double time1=wtime(); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( block_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0, src_head, src_adj, dev_adj, dev_begin, 0, totalEdgeCount, dev_count ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_kernel2) , dim3(1),dim3(1), 0, 0, dev_count); H_ERR(hipDeviceSynchronize() ); H_ERR(hipMemcpy(&count[min], dev_count, sizeof(index_t), hipMemcpyDeviceToHost)); //---------------- write result --------------------------- mygraph->ds_count[min * GPU_NUM + j] = count[min]; thd_count += count[min]; H_ERR(hipFree(src_head) ); H_ERR(hipFree(src_adj) ); // cout<<"steal GPU "<<i<<" GPU "<<min<<" part "<<j<<"\n"; } //step 4: write result for(int k=0; k<GPU_NUM; k++){ check += mygraph->ds_complete[k]; } } //---------------- work stealing end ----------------------- double time4 = wtime(); count[i] = thd_count; // cout<<"gpu "<<i<<" binary count="<<count[i]<<"\n"; // cout<<"time = "<<time4-time2<<" seconds"<<endl; H_ERR(hipFree(dev_adj) ); H_ERR(hipFree(dev_begin) ); H_ERR(hipFree(block_offset) ); H_ERR(hipFree(dev_count) ); return NULL; }
c17d003ec95c0ca785cd2fe2ca07710527f6ebc8.cu
//scan.cu //#include "kernel.cu" #include "comm.h" #include "wtime.h" #include "iostream" #define max_thd 256 #define max_block 256 #define thread_limit 256 graph * mygraph; __global__ void block_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/max_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ mycount++; bot = top + max_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/max_thd; top = top*m/max_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += gridDim.x*blockDim.x/ max_thd; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } // count[blockIdx.x]+=val; count[blockIdx.x]=val; } } __global__ void warp_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } tid += blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } //---------------------------------------------------------------------------------------- __global__ void classify_kernel //step 1: classify the edge list into different arrays ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, //inputs index_t* small_num, index_t* mid_num, index_t* large_num //outputs: small/large head, adjacent, and number by thread ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t small_offset=0; index_t mid_offset=0; // index_t large_offset=0; //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ small_offset++; } else{ //could be more then 2 catigories mid_offset++; } // else { //could be more then 2 catigories // large_offset++; // } } } small_num[tid] = small_offset; mid_num[tid] = mid_offset; // large_num[tid] = large_offset; } __global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256 ( index_t* data, index_t* block_offset ) { //step 1: each block do prefix sum inside int tid = threadIdx.x +blockIdx.x*blockDim.x; __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = data[tid]; __syncthreads(); index_t val=0; for(int i=0; i<=threadIdx.x; i++){ val += temp_in[i]; } __syncthreads(); if(threadIdx.x==255){ block_offset[blockIdx.x] = val; } data[tid] = val; __syncthreads(); } __global__ void prefix_kernel_2 ( index_t* block_offset ) { //step 2: collect each block's offset and do prefix for this set __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = block_offset[threadIdx.x]; __syncthreads(); index_t val=0; for(int i=0; i<threadIdx.x; i++){ val += temp_in[i]; } // val = temp_in[threadIdx.x]; block_offset[threadIdx.x] = val; __syncthreads(); } __global__ void prefix_kernel_3 ( index_t* data, index_t* block_offset ) { //step 3: update by adding block offset int tid = threadIdx.x + blockIdx.x*blockDim.x; index_t val = data[tid]; index_t offset = block_offset[blockIdx.x]; val += offset; data[tid] = val; __syncthreads(); } __global__ void collect_kernel ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, index_t* small_num, index_t* mid_num, index_t* large_num, index_t N1, index_t N2, vertex_t* dest_head, vertex_t* dest_adj ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t thd_base_small = 0; index_t thd_base_mid = N1; // index_t thd_base_large = N1+N2; if(tid!=0){ thd_base_small = small_num[tid-1]; thd_base_mid = N1 + mid_num[tid-1]; // thd_base_large = N1 + N2 + large_num[tid-1]; } //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; index_t small_offset = thd_base_small; index_t mid_offset = thd_base_mid; // index_t large_offset = thd_base_large; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ dest_head[small_offset] = head; dest_adj [small_offset] = adj; small_offset++; } else{ //could be more then 2 catigories dest_head[mid_offset] = head; dest_adj [mid_offset] = adj; mid_offset++; } // else { //could be more then 2 catigories // dest_head[large_offset] = head; // dest_adj [large_offset] = adj; // large_offset++; // } } } } __global__ void reduce_kernel2(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void* part_scan(void * data){ index_t thd_count=0; int GPU_id = *(int*)data; int i = GPU_id; // cout<<"GPU id = "<<GPU_id<<"\n"; cudaSetDevice(GPU_id); H_ERR(cudaDeviceSynchronize() ); vertex_t vert_count = mygraph->vert_count; vertex_t* dev_adj; index_t* dev_begin; index_t* dev_count; index_t* block_offset; H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&block_offset, max_block*sizeof(index_t)) ); index_t partEdgeCount = mygraph->partEdgeCount[i]; vertex_t* partAdj = mygraph->partAdj[i]; index_t* partBegin = mygraph->partBegin[i]; index_t* count = mygraph->count; H_ERR(cudaMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) ); //---------------- go into loop------------------- double time2=wtime(); for(int j=0; j<PART_NUM; j++){ //---------------- check ------------------------- if(mygraph->ds_status[i * GPU_NUM + j]>0){ break; } mygraph->ds_status[i * GPU_NUM + j]=1; mygraph->ds_complete[i]++; //---------------- run --------------------------- index_t totalEdgeCount = mygraph->partEdgeCount[j]; vertex_t* head = mygraph->partHead[j]; vertex_t* adj = mygraph->partAdj[j]; vertex_t* src_head; vertex_t* src_adj; H_ERR(cudaMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); // double time1=wtime(); H_ERR(cudaDeviceSynchronize() ); block_binary_kernel<<<max_block,max_thd>>> ( src_head, src_adj, dev_adj, dev_begin, 0, totalEdgeCount, dev_count ); H_ERR(cudaDeviceSynchronize() ); reduce_kernel2 <<<1,1>>>(dev_count); H_ERR(cudaDeviceSynchronize() ); H_ERR(cudaMemcpy(&count[i], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost)); thd_count += count[i]; H_ERR(cudaFree(src_head) ); H_ERR(cudaFree(src_adj) ); // cout<<"GPU "<<i<<" part "<<j<<"\n"; //---------------- write result --------------------------- mygraph->ds_count[i * GPU_NUM + j] = count[i]; } //---------------- work ending --------------------------- //---------------- work stealing --------------------------- int check = 0; for(int k=0; k<GPU_NUM; k++){ check += mygraph->ds_complete[k]; } while(check<GPU_NUM*GPU_NUM){ //step 1: looking for the GPU with most remaining work int min=0; for(int k=GPU_NUM-1; k>=0; k--){ if(mygraph->ds_complete[k]<mygraph->ds_complete[min]){ min = k; } if(mygraph->ds_complete[k] == mygraph->ds_complete[min]){ if(mygraph->ds_help[k] < mygraph->ds_help[min]){ min = k; } } } mygraph->ds_help[min]++; if(mygraph->ds_complete[min]==GPU_NUM){ for(int k=0; k<GPU_NUM; k++){ check += mygraph->ds_complete[k]; } continue; } //step 2: check ds_complete array and start moving data H_ERR(cudaFree(dev_adj) ); index_t partEdgeCount = mygraph->partEdgeCount[min]; partAdj = mygraph->partAdj[min]; partBegin = mygraph->partBegin[min]; H_ERR(cudaMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) ); //step 3: set flags and work int j = GPU_NUM - 1; while(j>=0){ if(mygraph->ds_status[min * GPU_NUM + j]>0){ j--; continue; } mygraph->ds_status[min * GPU_NUM + j] = 1; mygraph->ds_complete[min]++; //work index_t totalEdgeCount = mygraph->partEdgeCount[j]; vertex_t* head = mygraph->partHead[j]; vertex_t* adj = mygraph->partAdj[j]; vertex_t* src_head; vertex_t* src_adj; H_ERR(cudaMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); // double time1=wtime(); H_ERR(cudaDeviceSynchronize() ); block_binary_kernel<<<max_block,max_thd>>> ( src_head, src_adj, dev_adj, dev_begin, 0, totalEdgeCount, dev_count ); H_ERR(cudaDeviceSynchronize() ); reduce_kernel2 <<<1,1>>>(dev_count); H_ERR(cudaDeviceSynchronize() ); H_ERR(cudaMemcpy(&count[min], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost)); //---------------- write result --------------------------- mygraph->ds_count[min * GPU_NUM + j] = count[min]; thd_count += count[min]; H_ERR(cudaFree(src_head) ); H_ERR(cudaFree(src_adj) ); // cout<<"steal GPU "<<i<<" GPU "<<min<<" part "<<j<<"\n"; } //step 4: write result for(int k=0; k<GPU_NUM; k++){ check += mygraph->ds_complete[k]; } } //---------------- work stealing end ----------------------- double time4 = wtime(); count[i] = thd_count; // cout<<"gpu "<<i<<" binary count="<<count[i]<<"\n"; // cout<<"time = "<<time4-time2<<" seconds"<<endl; H_ERR(cudaFree(dev_adj) ); H_ERR(cudaFree(dev_begin) ); H_ERR(cudaFree(block_offset) ); H_ERR(cudaFree(dev_count) ); return NULL; }
80d8c71b8d514abda63e4d93f1af67563bb7a512.hip
// !!! This is a file automatically generated by hipify!!! // // by Jan Eric Kyprianidis <www.kyprianidis.com> // Copyright (C) 2010-2012 Computer Graphics Systems Group at the // Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de> // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // #include <oz/laplace_eq.h> #include <oz/color.h> #include <oz/gauss.h> #include <oz/generate.h> #include <oz/gpu_sampler2.h> #include <oz/gpu_plm2.h> #include <oz/tex2d_util.h> #include <oz/norm.h> namespace oz { struct LEqJacobiStep : public generator<float4> { gpu_sampler<float4,0> src_; leq_stencil_t stencil_; LEqJacobiStep( const gpu_image& src, leq_stencil_t stencil) : src_(src), stencil_(stencil) {} inline __device__ float4 operator()( int ix, int iy )const { float4 o = src_(ix, iy); if (o.w < 1) { switch (stencil_) { case LEQ_STENCIL_4: o = make_float4( 0.25f * ( make_float3(src_(ix, iy+1)) + make_float3(src_(ix-1, iy )) + make_float3(src_(ix+1, iy )) + make_float3(src_(ix , iy-1))), 0); break; case LEQ_STENCIL_8: o = make_float4( 0.125f * ( make_float3(src_(ix+1, iy+1)) + make_float3(src_(ix, iy+1)) + make_float3(src_(ix-1, iy+1)) + make_float3(src_(ix-1, iy )) + make_float3(src_(ix+1, iy )) + make_float3(src_(ix+1, iy-1)) + make_float3(src_(ix , iy-1)) + make_float3(src_(ix-1, iy-1))), 0); break; case LEQ_STENCIL_12: o = make_float4( 1.0f / 12.0f * ( 1 * make_float3(src_(ix+1, iy+1)) + 2 * make_float3(src_(ix, iy+1)) + 1 * make_float3(src_(ix-1, iy+1)) + 2 * make_float3(src_(ix-1, iy )) + 2 * make_float3(src_(ix+1, iy )) + 1 * make_float3(src_(ix+1, iy-1)) + 2 * make_float3(src_(ix , iy-1)) + 1 * make_float3(src_(ix-1, iy-1))), 0); break; case LEQ_STENCIL_20: o = make_float4( 1.0f / 20.0f * ( 1 * make_float3(src_(ix+1, iy+1)) + 4 * make_float3(src_(ix, iy+1)) + 1 * make_float3(src_(ix-1, iy+1)) + 4 * make_float3(src_(ix-1, iy )) + 4 * make_float3(src_(ix+1, iy )) + 1 * make_float3(src_(ix+1, iy-1)) + 4 * make_float3(src_(ix , iy-1)) + 1 * make_float3(src_(ix-1, iy-1))), 0); break; } } return o; } }; gpu_image leq_jacobi_step( const gpu_image& src, leq_stencil_t stencil ) { return generate(src.size(), LEqJacobiStep(src, stencil)); } struct LEqCorrectDown : public generator<float4> { gpu_sampler<float4,0> src_; LEqCorrectDown( const gpu_image& src ) : src_(src) {} inline __device__ float4 operator()( int ix, int iy ) const { int i = 2*ix; int j = 2*iy; float4 sum = make_float4(0); float4 c; c = src_(i, j ); if (c.w > 0) { sum += c; } c = src_(i+1, j ); if (c.w > 0) { sum += c; } c = src_(i , j+1); if (c.w > 0) { sum += c; } c = src_(i+1, j+1); if (c.w > 0) { sum += c; } if (sum.w > 0) sum /= sum.w; return sum; } }; gpu_image leq_correct_down( const gpu_image& src) { gpu_image r = generate((src.w()+1)/2, (src.h()+1)/2, LEqCorrectDown(src)); return r; } struct LEqCorrectUp : public generator<float4> { gpu_plm2<float4> src0_; gpu_sampler<float4,0> src1_; int upfilt_; LEqCorrectUp( const gpu_image& src0, const gpu_image& src1, leq_upfilt_t upfilt ) : src0_(src0), src1_(src1, ((upfilt==LEQ_UPFILT_FAST_BILINEAR) || (upfilt==LEQ_UPFILT_FAST_BICUBIC))? hipFilterModeLinear : hipFilterModePoint), upfilt_(upfilt) {} inline __device__ float4 operator()( int ix, int iy )const { float4 c = src0_(ix, iy); if (c.w < 1) { float2 uv = make_float2(0.5f * (ix + 0.5f), 0.5f * (iy + 0.5f)); switch (upfilt_) { case LEQ_UPFILT_NEAREST: c = make_float4(make_float3(src1_(ix/2, iy/2)), 0); break; case LEQ_UPFILT_FAST_BILINEAR: c = make_float4(make_float3(src1_(uv.x, uv.y)), 0); break; case LEQ_UPFILT_BILINEAR: c = make_float4(make_float3(tex2DBilinear(src1_.texref(), uv.x, uv.y)), 0); break; case LEQ_UPFILT_FAST_BICUBIC: c = make_float4(make_float3(tex2DFastBicubic(src1_.texref(), uv.x, uv.y)), 0); break; case LEQ_UPFILT_BICUBIC: c = make_float4(make_float3(tex2DBicubic(src1_.texref(), uv.x, uv.y)), 0); break; } } return c; } }; gpu_image leq_correct_up( const gpu_image& src0, const gpu_image& src1, leq_upfilt_t upfilt ) { gpu_image r = generate(src0.size(), LEqCorrectUp(src0, src1, upfilt)); return r; } struct LEqResidual : public oz::generator<float3> { gpu_sampler<float4,0> src_; leq_stencil_t stencil_; LEqResidual( const gpu_image& src, leq_stencil_t stencil ) : src_(src), stencil_(stencil) {} inline __device__ float3 operator()( int ix, int iy ) const { float4 c = src_(ix, iy); if (c.w < 1) { switch (stencil_) { case LEQ_STENCIL_4: return (make_float3(src_(ix , iy+1)) + make_float3(src_(ix+1, iy )) + make_float3(src_(ix-1, iy )) + make_float3(src_(ix , iy-1)) - 4 * make_float3(c)) / 4; case LEQ_STENCIL_8: return (make_float3(src_(ix+1, iy+1)) + make_float3(src_(ix , iy+1)) + make_float3(src_(ix-1, iy+1)) + make_float3(src_(ix+1, iy )) + make_float3(src_(ix-1, iy )) + make_float3(src_(ix+1, iy-1)) + make_float3(src_(ix , iy-1)) + make_float3(src_(ix-1, iy-1)) - 8 * make_float3(c)) / 8; case LEQ_STENCIL_12: return (1*make_float3(src_(ix+1, iy+1)) + 2*make_float3(src_(ix , iy+1)) + 1*make_float3(src_(ix-1, iy+1)) + 2*make_float3(src_(ix+1, iy )) + 2*make_float3(src_(ix-1, iy )) + 1*make_float3(src_(ix+1, iy-1)) + 2*make_float3(src_(ix , iy-1)) + 1*make_float3(src_(ix-1, iy-1)) - 12 * make_float3(c)) / 12; case LEQ_STENCIL_20: return (1*make_float3(src_(ix+1, iy+1)) + 4*make_float3(src_(ix , iy+1)) + 1*make_float3(src_(ix-1, iy+1)) + 4*make_float3(src_(ix+1, iy )) + 4*make_float3(src_(ix-1, iy )) + 1*make_float3(src_(ix+1, iy-1)) + 4*make_float3(src_(ix , iy-1)) + 1*make_float3(src_(ix-1, iy-1)) - 20 * make_float3(c)) / 20; } } return make_float3(0); } }; gpu_image leq_residual( const gpu_image& src, leq_stencil_t stencil ) { return generate(src.size(), LEqResidual(src, stencil)); } float leq_error( const gpu_image& src, leq_stencil_t stencil ) { return sqrtf(sum(abs2(leq_residual(src, stencil)))); } gpu_image leq_vcycle( const gpu_image& b, int v2, leq_stencil_t stencil, leq_upfilt_t upfilt ) { if ((b.w() <= 2) || (b.h() <= 2)) return b; gpu_image tmp = b; tmp = leq_correct_down(tmp); tmp = leq_vcycle(tmp, v2, stencil, upfilt); tmp = leq_correct_up(b, tmp, upfilt); for (int k = 0; k < v2; ++k) tmp = leq_jacobi_step(tmp, stencil); return tmp; } }
80d8c71b8d514abda63e4d93f1af67563bb7a512.cu
// // by Jan Eric Kyprianidis <www.kyprianidis.com> // Copyright (C) 2010-2012 Computer Graphics Systems Group at the // Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de> // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // #include <oz/laplace_eq.h> #include <oz/color.h> #include <oz/gauss.h> #include <oz/generate.h> #include <oz/gpu_sampler2.h> #include <oz/gpu_plm2.h> #include <oz/tex2d_util.h> #include <oz/norm.h> namespace oz { struct LEqJacobiStep : public generator<float4> { gpu_sampler<float4,0> src_; leq_stencil_t stencil_; LEqJacobiStep( const gpu_image& src, leq_stencil_t stencil) : src_(src), stencil_(stencil) {} inline __device__ float4 operator()( int ix, int iy )const { float4 o = src_(ix, iy); if (o.w < 1) { switch (stencil_) { case LEQ_STENCIL_4: o = make_float4( 0.25f * ( make_float3(src_(ix, iy+1)) + make_float3(src_(ix-1, iy )) + make_float3(src_(ix+1, iy )) + make_float3(src_(ix , iy-1))), 0); break; case LEQ_STENCIL_8: o = make_float4( 0.125f * ( make_float3(src_(ix+1, iy+1)) + make_float3(src_(ix, iy+1)) + make_float3(src_(ix-1, iy+1)) + make_float3(src_(ix-1, iy )) + make_float3(src_(ix+1, iy )) + make_float3(src_(ix+1, iy-1)) + make_float3(src_(ix , iy-1)) + make_float3(src_(ix-1, iy-1))), 0); break; case LEQ_STENCIL_12: o = make_float4( 1.0f / 12.0f * ( 1 * make_float3(src_(ix+1, iy+1)) + 2 * make_float3(src_(ix, iy+1)) + 1 * make_float3(src_(ix-1, iy+1)) + 2 * make_float3(src_(ix-1, iy )) + 2 * make_float3(src_(ix+1, iy )) + 1 * make_float3(src_(ix+1, iy-1)) + 2 * make_float3(src_(ix , iy-1)) + 1 * make_float3(src_(ix-1, iy-1))), 0); break; case LEQ_STENCIL_20: o = make_float4( 1.0f / 20.0f * ( 1 * make_float3(src_(ix+1, iy+1)) + 4 * make_float3(src_(ix, iy+1)) + 1 * make_float3(src_(ix-1, iy+1)) + 4 * make_float3(src_(ix-1, iy )) + 4 * make_float3(src_(ix+1, iy )) + 1 * make_float3(src_(ix+1, iy-1)) + 4 * make_float3(src_(ix , iy-1)) + 1 * make_float3(src_(ix-1, iy-1))), 0); break; } } return o; } }; gpu_image leq_jacobi_step( const gpu_image& src, leq_stencil_t stencil ) { return generate(src.size(), LEqJacobiStep(src, stencil)); } struct LEqCorrectDown : public generator<float4> { gpu_sampler<float4,0> src_; LEqCorrectDown( const gpu_image& src ) : src_(src) {} inline __device__ float4 operator()( int ix, int iy ) const { int i = 2*ix; int j = 2*iy; float4 sum = make_float4(0); float4 c; c = src_(i, j ); if (c.w > 0) { sum += c; } c = src_(i+1, j ); if (c.w > 0) { sum += c; } c = src_(i , j+1); if (c.w > 0) { sum += c; } c = src_(i+1, j+1); if (c.w > 0) { sum += c; } if (sum.w > 0) sum /= sum.w; return sum; } }; gpu_image leq_correct_down( const gpu_image& src) { gpu_image r = generate((src.w()+1)/2, (src.h()+1)/2, LEqCorrectDown(src)); return r; } struct LEqCorrectUp : public generator<float4> { gpu_plm2<float4> src0_; gpu_sampler<float4,0> src1_; int upfilt_; LEqCorrectUp( const gpu_image& src0, const gpu_image& src1, leq_upfilt_t upfilt ) : src0_(src0), src1_(src1, ((upfilt==LEQ_UPFILT_FAST_BILINEAR) || (upfilt==LEQ_UPFILT_FAST_BICUBIC))? cudaFilterModeLinear : cudaFilterModePoint), upfilt_(upfilt) {} inline __device__ float4 operator()( int ix, int iy )const { float4 c = src0_(ix, iy); if (c.w < 1) { float2 uv = make_float2(0.5f * (ix + 0.5f), 0.5f * (iy + 0.5f)); switch (upfilt_) { case LEQ_UPFILT_NEAREST: c = make_float4(make_float3(src1_(ix/2, iy/2)), 0); break; case LEQ_UPFILT_FAST_BILINEAR: c = make_float4(make_float3(src1_(uv.x, uv.y)), 0); break; case LEQ_UPFILT_BILINEAR: c = make_float4(make_float3(tex2DBilinear(src1_.texref(), uv.x, uv.y)), 0); break; case LEQ_UPFILT_FAST_BICUBIC: c = make_float4(make_float3(tex2DFastBicubic(src1_.texref(), uv.x, uv.y)), 0); break; case LEQ_UPFILT_BICUBIC: c = make_float4(make_float3(tex2DBicubic(src1_.texref(), uv.x, uv.y)), 0); break; } } return c; } }; gpu_image leq_correct_up( const gpu_image& src0, const gpu_image& src1, leq_upfilt_t upfilt ) { gpu_image r = generate(src0.size(), LEqCorrectUp(src0, src1, upfilt)); return r; } struct LEqResidual : public oz::generator<float3> { gpu_sampler<float4,0> src_; leq_stencil_t stencil_; LEqResidual( const gpu_image& src, leq_stencil_t stencil ) : src_(src), stencil_(stencil) {} inline __device__ float3 operator()( int ix, int iy ) const { float4 c = src_(ix, iy); if (c.w < 1) { switch (stencil_) { case LEQ_STENCIL_4: return (make_float3(src_(ix , iy+1)) + make_float3(src_(ix+1, iy )) + make_float3(src_(ix-1, iy )) + make_float3(src_(ix , iy-1)) - 4 * make_float3(c)) / 4; case LEQ_STENCIL_8: return (make_float3(src_(ix+1, iy+1)) + make_float3(src_(ix , iy+1)) + make_float3(src_(ix-1, iy+1)) + make_float3(src_(ix+1, iy )) + make_float3(src_(ix-1, iy )) + make_float3(src_(ix+1, iy-1)) + make_float3(src_(ix , iy-1)) + make_float3(src_(ix-1, iy-1)) - 8 * make_float3(c)) / 8; case LEQ_STENCIL_12: return (1*make_float3(src_(ix+1, iy+1)) + 2*make_float3(src_(ix , iy+1)) + 1*make_float3(src_(ix-1, iy+1)) + 2*make_float3(src_(ix+1, iy )) + 2*make_float3(src_(ix-1, iy )) + 1*make_float3(src_(ix+1, iy-1)) + 2*make_float3(src_(ix , iy-1)) + 1*make_float3(src_(ix-1, iy-1)) - 12 * make_float3(c)) / 12; case LEQ_STENCIL_20: return (1*make_float3(src_(ix+1, iy+1)) + 4*make_float3(src_(ix , iy+1)) + 1*make_float3(src_(ix-1, iy+1)) + 4*make_float3(src_(ix+1, iy )) + 4*make_float3(src_(ix-1, iy )) + 1*make_float3(src_(ix+1, iy-1)) + 4*make_float3(src_(ix , iy-1)) + 1*make_float3(src_(ix-1, iy-1)) - 20 * make_float3(c)) / 20; } } return make_float3(0); } }; gpu_image leq_residual( const gpu_image& src, leq_stencil_t stencil ) { return generate(src.size(), LEqResidual(src, stencil)); } float leq_error( const gpu_image& src, leq_stencil_t stencil ) { return sqrtf(sum(abs2(leq_residual(src, stencil)))); } gpu_image leq_vcycle( const gpu_image& b, int v2, leq_stencil_t stencil, leq_upfilt_t upfilt ) { if ((b.w() <= 2) || (b.h() <= 2)) return b; gpu_image tmp = b; tmp = leq_correct_down(tmp); tmp = leq_vcycle(tmp, v2, stencil, upfilt); tmp = leq_correct_up(b, tmp, upfilt); for (int k = 0; k < v2; ++k) tmp = leq_jacobi_step(tmp, stencil); return tmp; } }
c4771a9d7503804927fbc8c25401b45ed81b90bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************* GPU Version: Tsinghua University, Aug. 2012. Written by Yun Fei in collaboration with W. Wang and B. Wang Original: Optimization Technology Center. Argonne National Laboratory and Northwestern University. Written by Ciyou Zhu in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. Contributors: * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to pseudocode. This software is freely available, but we expect that all publications describing work using this software, or all commercial products using it, quote at least one of the references given below: * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing , 16, 5, pp. 1190-1208. * C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. *************************************************************************/ #include "lbfgsbcuda.h" namespace lbfgsbcuda { namespace cauchy { template<int bx> __global__ void kernel0( int n, const real* g, const int* nbd, real* t, const real* x, const real* u, const real* l, int* iwhere ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) { int iwi = iwhere[i]; if( iwi != 3 && iwi != -1 ) { real neggi = -g[i]; int nbdi = nbd[i]; real tl = 0; real tu = 0; if( nbdi <= 2 ) { tl = x[i] - l[i]; } if( nbdi >= 2 ) { tu = u[i] - x[i]; } if(nbdi <= 2 && tl<=0 && neggi <= 0) { iwi = 1; }else if(nbdi >= 2 && tu <= 0 && neggi >= 0) { iwi = 2; } else if( neggi == 0) { iwi = -3; } else { iwi = 0; } iwhere[i] = iwi; if((iwi != 0 && iwi != -1) || neggi == 0) { mySum = machinemaximum; } else { if(nbdi <= 2 && nbdi != 0 && neggi < 0) { mySum = tl / (-neggi); } else if(nbdi >= 2 && neggi > 0) { mySum = tu / neggi; } else { mySum = machinemaximum; } } } else { mySum = machinemaximum; } } else { mySum = machinemaximum; } sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = minr(mySum, sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = minr(mySum, sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = minr(mySum, sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = minr(mySum, sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = minr(mySum, smem[32]);} if(bx > 16) {*smem = mySum = minr(mySum, smem[16]);} if(bx > 8) {*smem = mySum = minr(mySum, smem[8]);} if(bx > 4) {*smem = mySum = minr(mySum, smem[4]);} if(bx > 2) {*smem = mySum = minr(mySum, smem[2]);} if(bx > 1) {*smem = mySum = minr(mySum, smem[1]);} } if (tid == 0) t[blockIdx.x] = mySum; } template<int bx> __global__ void kernel01( const int n, const real* buf_in, real* buf_out) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) mySum = buf_in[i]; else mySum = machinemaximum; sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = minr(mySum, sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = minr(mySum, sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = minr(mySum, sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = minr(mySum, sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = minr(mySum, smem[32]);} if(bx > 16) {*smem = mySum = minr(mySum, smem[16]);} if(bx > 8) {*smem = mySum = minr(mySum, smem[8]);} if(bx > 4) {*smem = mySum = minr(mySum, smem[4]);} if(bx > 2) {*smem = mySum = minr(mySum, smem[2]);} if(bx > 1) {*smem = mySum = minr(mySum, smem[1]);} } if(tid == 0) { buf_out[blockIdx.x] = mySum; } } template<int bx> __global__ void kernel1( const int n, const real* g, real* buf_s_r, const int* iwhere ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i >= n) { mySum = 0; } else { int iwi = iwhere[i]; if(iwi != 0 && iwi != -1) { mySum = 0; } else { real neggi = g[i]; mySum = -neggi * neggi; } } sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if (tid == 0) buf_s_r[blockIdx.x] = mySum; } template<int bx> __global__ void kernel20( const int n, const int head, const int m, const int col, const int iPitch, const int oPitch, const real* g, real* buf_array_p, const real* wy, const real* ws, const int* iwhere ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) { int iwi = iwhere[i]; if(iwi != 0 && iwi != -1) { mySum = 0; } else { real neggi = -g[i]; real p0; if(j < col) { int pointr = Modular((head + j), m); p0 = wy[i * iPitch + pointr]; } else { int pointr = Modular((head + j - col), m); p0 = ws[i * iPitch + pointr]; } mySum = p0 * neggi; } } else { mySum = 0; } sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if (tid == 0) buf_array_p[j * oPitch + blockIdx.x] = mySum; } template<int bx> __global__ void kernel21( const int n, const int iPitch, const int oPitch, const real* buf_in, real* buf_out) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) mySum = buf_in[j * iPitch + i]; else mySum = 0; sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if(tid == 0) { buf_out[j * oPitch + blockIdx.x] = mySum; } } __global__ void kernel22( const int n, real* p, const real theta ) { const int i = threadIdx.x; if(i >= n) return; p[i] *= theta; } __global__ void kernel4( const int col2, const real* p, real* c, const real dtm ) { const int i = threadIdx.x; if(i >= col2) return; c[i] = p[i] * dtm; } __global__ void kernel3( const int n, const real* x, const real* g, real* xcp, real* xcpb, const real dtm, const int* iwhere ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= n) return; real inc; int iwi = iwhere[i]; if(iwi != 0 && iwi != -1) { inc = 0; } else { real neggi = -g[i]; inc = neggi * dtm; } real res = x[i] + inc; xcp[i] = res; xcpb[i] = res; } void prog0 (const int& n, const real* x, const real* l, const real* u, const int* nbd, const real* g, real* t, real* xcp, real* xcpb, const int& m, const real* wy, const real* ws, const real* sy, const int iPitch, real* wt, const real& theta, const int& col, const int& head, real* p, real* c, real* v, int& nint, const real& sbgnrm, real* buf_s_r, real* buf_array_p, int* iwhere, const int& iPitch_normal, const hipStream_t* streamPool ) { CheckBuffer(x, n, n); CheckBuffer(l, n, n); CheckBuffer(u, n, n); if(sbgnrm <= 0) { hipMemcpyAsync(xcp, x, n * sizeof(real), hipMemcpyDeviceToDevice); return; } if(col > 0) hipMemsetAsync(p, 0, col * 2 * sizeof(real)); real* vec_h; real* vec_d; cutilSafeCall(hipHostMalloc(&vec_h, 3 * sizeof(real), hipHostMallocMapped)); cutilSafeCall(hipHostGetDevicePointer(&vec_d, vec_h, 0)); real* bkmin_d = vec_d; real* f1_d = vec_d + 1; real* bkmin_h = vec_h; real* f1_h = vec_h + 1; real* fd_h = vec_h + 2; int nblock0 = n; int mi = log2Up(nblock0); int nblock1 = iDivUp2(nblock0, mi); real* output0 = (nblock1 == 1) ? bkmin_d : t; real* output1 = (nblock1 == 1) ? f1_d : buf_s_r; real* output2 = (nblock1 == 1) ? p : buf_array_p; dynamicCall(kernel0, mi, nblock1, 1, streamPool[0], (nblock0, g, nbd, output0, x, u, l, iwhere)); dynamicCall(kernel1, mi, nblock1, 1, streamPool[0], (nblock0, g, output1, iwhere)); int op20 = (nblock1 == 1) ? 1 : iPitch_normal; if(col > 0) { dynamicCall(kernel20, mi, nblock1, col * 2, streamPool[0], (nblock0, head, m, col, iPitch, op20, g, output2, wy, ws, iwhere)); } nblock0 = nblock1; while(nblock0 > 1) { nblock1 = iDivUp2(nblock0, mi); real* input0 = output0; real* input1 = output1; real* input2 = output2; output0 = (nblock1 == 1) ? bkmin_d : (output0 + nblock0); output1 = (nblock1 == 1) ? f1_d : (output1 + nblock0); output2 = (nblock1 == 1) ? p : (output2 + nblock0); dynamicCall(kernel01, mi, nblock1, 1, streamPool[0], (nblock0, input0, output0)); dynamicCall(kernel21, mi, nblock1, 1, streamPool[1], (nblock0, 1, 1, input1, output1)); int op20 = (nblock1 == 1) ? 1 : iPitch_normal; if(col > 0) { dynamicCall(kernel21, mi, nblock1, col * 2, streamPool[2], (nblock0, iPitch_normal, op20, input2, output2)); } nblock0 = nblock1; } if( col > 0 && theta != 1 ) { CheckBuffer(p, col * 2, col * 2); hipLaunchKernelGGL(( kernel22), dim3(dim3(1)), dim3(dim3(col)), 0, streamPool[2], col, p + col, theta); CheckBuffer(p, col * 2, col * 2); } *fd_h = 0; if(col > 0) { bmv::prog0(sy, col, iPitch, p, v, streamPool[2]); CheckBuffer(v, col * 2, col * 2); CheckBuffer(p, col * 2, col * 2); bmv::prog1(wt, col, iPitch, p, v, streamPool[2]); CheckBuffer(v, col * 2, col * 2); CheckBuffer(p, col * 2, col * 2); bmv::prog2(sy, wt, col, iPitch, p, v, streamPool[2]); CheckBuffer(v, col * 2, col * 2); CheckBuffer(p, col * 2, col * 2); hipblasSetStream(cublasHd, streamPool[2]); cublasRdot(cublasHd, col * 2, v, 1, p, 1, fd_h); hipblasSetStream(cublasHd, NULL); } cutilSafeCall(hipDeviceSynchronize()); real f2 = -theta * *f1_h - *fd_h; real dt = -*f1_h / f2; real dtm = __min(*bkmin_h, dt); dtm = __max(0, dtm); hipLaunchKernelGGL(( kernel3), dim3(dim3(iDivUp(n, 512))), dim3(dim3(512)), 0, streamPool[0], n, x, g, xcp, xcpb, dtm, iwhere); if(col > 0) { hipLaunchKernelGGL(( kernel4), dim3(dim3(1)), dim3(dim3(col * 2)), 0, streamPool[1], col * 2, p, c, dtm); } } }; };
c4771a9d7503804927fbc8c25401b45ed81b90bf.cu
/************************************************************************* GPU Version: Tsinghua University, Aug. 2012. Written by Yun Fei in collaboration with W. Wang and B. Wang Original: Optimization Technology Center. Argonne National Laboratory and Northwestern University. Written by Ciyou Zhu in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. Contributors: * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to pseudocode. This software is freely available, but we expect that all publications describing work using this software, or all commercial products using it, quote at least one of the references given below: * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing , 16, 5, pp. 1190-1208. * C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. *************************************************************************/ #include "lbfgsbcuda.h" namespace lbfgsbcuda { namespace cauchy { template<int bx> __global__ void kernel0( int n, const real* g, const int* nbd, real* t, const real* x, const real* u, const real* l, int* iwhere ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) { int iwi = iwhere[i]; if( iwi != 3 && iwi != -1 ) { real neggi = -g[i]; int nbdi = nbd[i]; real tl = 0; real tu = 0; if( nbdi <= 2 ) { tl = x[i] - l[i]; } if( nbdi >= 2 ) { tu = u[i] - x[i]; } if(nbdi <= 2 && tl<=0 && neggi <= 0) { iwi = 1; }else if(nbdi >= 2 && tu <= 0 && neggi >= 0) { iwi = 2; } else if( neggi == 0) { iwi = -3; } else { iwi = 0; } iwhere[i] = iwi; if((iwi != 0 && iwi != -1) || neggi == 0) { mySum = machinemaximum; } else { if(nbdi <= 2 && nbdi != 0 && neggi < 0) { mySum = tl / (-neggi); } else if(nbdi >= 2 && neggi > 0) { mySum = tu / neggi; } else { mySum = machinemaximum; } } } else { mySum = machinemaximum; } } else { mySum = machinemaximum; } sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = minr(mySum, sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = minr(mySum, sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = minr(mySum, sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = minr(mySum, sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = minr(mySum, smem[32]);} if(bx > 16) {*smem = mySum = minr(mySum, smem[16]);} if(bx > 8) {*smem = mySum = minr(mySum, smem[8]);} if(bx > 4) {*smem = mySum = minr(mySum, smem[4]);} if(bx > 2) {*smem = mySum = minr(mySum, smem[2]);} if(bx > 1) {*smem = mySum = minr(mySum, smem[1]);} } if (tid == 0) t[blockIdx.x] = mySum; } template<int bx> __global__ void kernel01( const int n, const real* buf_in, real* buf_out) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) mySum = buf_in[i]; else mySum = machinemaximum; sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = minr(mySum, sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = minr(mySum, sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = minr(mySum, sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = minr(mySum, sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = minr(mySum, smem[32]);} if(bx > 16) {*smem = mySum = minr(mySum, smem[16]);} if(bx > 8) {*smem = mySum = minr(mySum, smem[8]);} if(bx > 4) {*smem = mySum = minr(mySum, smem[4]);} if(bx > 2) {*smem = mySum = minr(mySum, smem[2]);} if(bx > 1) {*smem = mySum = minr(mySum, smem[1]);} } if(tid == 0) { buf_out[blockIdx.x] = mySum; } } template<int bx> __global__ void kernel1( const int n, const real* g, real* buf_s_r, const int* iwhere ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i >= n) { mySum = 0; } else { int iwi = iwhere[i]; if(iwi != 0 && iwi != -1) { mySum = 0; } else { real neggi = g[i]; mySum = -neggi * neggi; } } sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if (tid == 0) buf_s_r[blockIdx.x] = mySum; } template<int bx> __global__ void kernel20( const int n, const int head, const int m, const int col, const int iPitch, const int oPitch, const real* g, real* buf_array_p, const real* wy, const real* ws, const int* iwhere ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) { int iwi = iwhere[i]; if(iwi != 0 && iwi != -1) { mySum = 0; } else { real neggi = -g[i]; real p0; if(j < col) { int pointr = Modular((head + j), m); p0 = wy[i * iPitch + pointr]; } else { int pointr = Modular((head + j - col), m); p0 = ws[i * iPitch + pointr]; } mySum = p0 * neggi; } } else { mySum = 0; } sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if (tid == 0) buf_array_p[j * oPitch + blockIdx.x] = mySum; } template<int bx> __global__ void kernel21( const int n, const int iPitch, const int oPitch, const real* buf_in, real* buf_out) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) mySum = buf_in[j * iPitch + i]; else mySum = 0; sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if(tid == 0) { buf_out[j * oPitch + blockIdx.x] = mySum; } } __global__ void kernel22( const int n, real* p, const real theta ) { const int i = threadIdx.x; if(i >= n) return; p[i] *= theta; } __global__ void kernel4( const int col2, const real* p, real* c, const real dtm ) { const int i = threadIdx.x; if(i >= col2) return; c[i] = p[i] * dtm; } __global__ void kernel3( const int n, const real* x, const real* g, real* xcp, real* xcpb, const real dtm, const int* iwhere ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= n) return; real inc; int iwi = iwhere[i]; if(iwi != 0 && iwi != -1) { inc = 0; } else { real neggi = -g[i]; inc = neggi * dtm; } real res = x[i] + inc; xcp[i] = res; xcpb[i] = res; } void prog0 (const int& n, const real* x, const real* l, const real* u, const int* nbd, const real* g, real* t, real* xcp, real* xcpb, const int& m, const real* wy, const real* ws, const real* sy, const int iPitch, real* wt, const real& theta, const int& col, const int& head, real* p, real* c, real* v, int& nint, const real& sbgnrm, real* buf_s_r, real* buf_array_p, int* iwhere, const int& iPitch_normal, const cudaStream_t* streamPool ) { CheckBuffer(x, n, n); CheckBuffer(l, n, n); CheckBuffer(u, n, n); if(sbgnrm <= 0) { cudaMemcpyAsync(xcp, x, n * sizeof(real), cudaMemcpyDeviceToDevice); return; } if(col > 0) cudaMemsetAsync(p, 0, col * 2 * sizeof(real)); real* vec_h; real* vec_d; cutilSafeCall(cudaHostAlloc(&vec_h, 3 * sizeof(real), cudaHostAllocMapped)); cutilSafeCall(cudaHostGetDevicePointer(&vec_d, vec_h, 0)); real* bkmin_d = vec_d; real* f1_d = vec_d + 1; real* bkmin_h = vec_h; real* f1_h = vec_h + 1; real* fd_h = vec_h + 2; int nblock0 = n; int mi = log2Up(nblock0); int nblock1 = iDivUp2(nblock0, mi); real* output0 = (nblock1 == 1) ? bkmin_d : t; real* output1 = (nblock1 == 1) ? f1_d : buf_s_r; real* output2 = (nblock1 == 1) ? p : buf_array_p; dynamicCall(kernel0, mi, nblock1, 1, streamPool[0], (nblock0, g, nbd, output0, x, u, l, iwhere)); dynamicCall(kernel1, mi, nblock1, 1, streamPool[0], (nblock0, g, output1, iwhere)); int op20 = (nblock1 == 1) ? 1 : iPitch_normal; if(col > 0) { dynamicCall(kernel20, mi, nblock1, col * 2, streamPool[0], (nblock0, head, m, col, iPitch, op20, g, output2, wy, ws, iwhere)); } nblock0 = nblock1; while(nblock0 > 1) { nblock1 = iDivUp2(nblock0, mi); real* input0 = output0; real* input1 = output1; real* input2 = output2; output0 = (nblock1 == 1) ? bkmin_d : (output0 + nblock0); output1 = (nblock1 == 1) ? f1_d : (output1 + nblock0); output2 = (nblock1 == 1) ? p : (output2 + nblock0); dynamicCall(kernel01, mi, nblock1, 1, streamPool[0], (nblock0, input0, output0)); dynamicCall(kernel21, mi, nblock1, 1, streamPool[1], (nblock0, 1, 1, input1, output1)); int op20 = (nblock1 == 1) ? 1 : iPitch_normal; if(col > 0) { dynamicCall(kernel21, mi, nblock1, col * 2, streamPool[2], (nblock0, iPitch_normal, op20, input2, output2)); } nblock0 = nblock1; } if( col > 0 && theta != 1 ) { CheckBuffer(p, col * 2, col * 2); kernel22<<<dim3(1), dim3(col), 0, streamPool[2]>>> (col, p + col, theta); CheckBuffer(p, col * 2, col * 2); } *fd_h = 0; if(col > 0) { bmv::prog0(sy, col, iPitch, p, v, streamPool[2]); CheckBuffer(v, col * 2, col * 2); CheckBuffer(p, col * 2, col * 2); bmv::prog1(wt, col, iPitch, p, v, streamPool[2]); CheckBuffer(v, col * 2, col * 2); CheckBuffer(p, col * 2, col * 2); bmv::prog2(sy, wt, col, iPitch, p, v, streamPool[2]); CheckBuffer(v, col * 2, col * 2); CheckBuffer(p, col * 2, col * 2); cublasSetStream(cublasHd, streamPool[2]); cublasRdot(cublasHd, col * 2, v, 1, p, 1, fd_h); cublasSetStream(cublasHd, NULL); } cutilSafeCall(cudaDeviceSynchronize()); real f2 = -theta * *f1_h - *fd_h; real dt = -*f1_h / f2; real dtm = __min(*bkmin_h, dt); dtm = __max(0, dtm); kernel3<<<dim3(iDivUp(n, 512)), dim3(512), 0, streamPool[0]>>> (n, x, g, xcp, xcpb, dtm, iwhere); if(col > 0) { kernel4<<<dim3(1), dim3(col * 2), 0, streamPool[1]>>> (col * 2, p, c, dtm); } } }; };
52e359681062e878cd549578c474c2f5c59ed08b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include "caffe/fast_rcnn_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out, Dtype sigma2) { // f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma // |x| - 0.5 / sigma / sigma otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1.0 / sigma2) { out[index] = 0.5 * val * val * sigma2; } else { out[index] = abs_val - 0.5 / sigma2; } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); // d := b0 - b1 if (has_weights_) { // apply "inside" weights caffe_gpu_mul( count, bottom[2]->gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data()); // d := w_in * (b0 - b1) } hipLaunchKernelGGL(( SmoothL1Forward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(), count, diff_.gpu_data(), errors_.mutable_gpu_data(), sigma2_); CUDA_POST_KERNEL_CHECK; if (has_weights_) { // apply "outside" weights caffe_gpu_mul( count, bottom[3]->gpu_data(), errors_.gpu_data(), errors_.mutable_gpu_data()); // d := w_out * SmoothL1(w_in * (b0 - b1)) } Dtype loss; caffe_gpu_dot(count, ones_.gpu_data(), errors_.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); } template <typename Dtype> __global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out, Dtype sigma2) { // f'(x) = sigma * sigma * x if |x| < 1 / sigma / sigma // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1.0 / sigma2) { out[index] = sigma2 * val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // after forwards, diff_ holds w_in * (b0 - b1) int count = diff_.count(); hipLaunchKernelGGL(( SmoothL1Backward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(), count, diff_.gpu_data(), diff_.mutable_gpu_data(), sigma2_); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); caffe_gpu_axpby( count, // count alpha, // alpha diff_.gpu_data(), // x Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // y if (has_weights_) { // Scale by "inside" weight caffe_gpu_mul( count, bottom[2]->gpu_data(), bottom[i]->gpu_diff(), bottom[i]->mutable_gpu_diff()); // Scale by "outside" weight caffe_gpu_mul( count, bottom[3]->gpu_data(), bottom[i]->gpu_diff(), bottom[i]->mutable_gpu_diff()); } } } } INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); } // namespace caffe
52e359681062e878cd549578c474c2f5c59ed08b.cu
// ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include "caffe/fast_rcnn_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out, Dtype sigma2) { // f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma // |x| - 0.5 / sigma / sigma otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1.0 / sigma2) { out[index] = 0.5 * val * val * sigma2; } else { out[index] = abs_val - 0.5 / sigma2; } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); // d := b0 - b1 if (has_weights_) { // apply "inside" weights caffe_gpu_mul( count, bottom[2]->gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data()); // d := w_in * (b0 - b1) } SmoothL1Forward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>( count, diff_.gpu_data(), errors_.mutable_gpu_data(), sigma2_); CUDA_POST_KERNEL_CHECK; if (has_weights_) { // apply "outside" weights caffe_gpu_mul( count, bottom[3]->gpu_data(), errors_.gpu_data(), errors_.mutable_gpu_data()); // d := w_out * SmoothL1(w_in * (b0 - b1)) } Dtype loss; caffe_gpu_dot(count, ones_.gpu_data(), errors_.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); } template <typename Dtype> __global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out, Dtype sigma2) { // f'(x) = sigma * sigma * x if |x| < 1 / sigma / sigma // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1.0 / sigma2) { out[index] = sigma2 * val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // after forwards, diff_ holds w_in * (b0 - b1) int count = diff_.count(); SmoothL1Backward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>( count, diff_.gpu_data(), diff_.mutable_gpu_data(), sigma2_); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); caffe_gpu_axpby( count, // count alpha, // alpha diff_.gpu_data(), // x Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // y if (has_weights_) { // Scale by "inside" weight caffe_gpu_mul( count, bottom[2]->gpu_data(), bottom[i]->gpu_diff(), bottom[i]->mutable_gpu_diff()); // Scale by "outside" weight caffe_gpu_mul( count, bottom[3]->gpu_data(), bottom[i]->gpu_diff(), bottom[i]->mutable_gpu_diff()); } } } } INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); } // namespace caffe
650832569a5dbb7aa6cdab4afcf69500459ddad9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "engine.h" //using cuda to initiate canvas array __global__ void gp_array_list_init(int *list, int val){ list[blockIdx.x] = val; //printf("list[%d]:%d\n",blockIdx.x,list[blockIdx.x]); } __global__ void gp_array_set(int *d_r,int *d_g,int *d_b, int *d_x, int *d_y, int r, int g, int b){//int *d_r, int *d_g, int *d_b, int *x, int *y){ int index = blockIdx.x * blockDim.x + threadIdx.x; int x_lim = X_MAX - X_MIN; printf("d_r%d\n",d_r[index]); printf("d_y%d\n",d_y[index]); //int buffer = (d_y[index] > X_) ? d_y[index] : ok; //printf("buffer:%d\n",buffer); //printf("compare:%d\n",buffer > Y if(d_y[index] > Y_MAX || d_y[index] <= Y_MIN || d_x[index] >= X_MAX || d_x[index] < X_MIN){ printf("Err: in arr, coordinate has exceded dimension\n"); printf("Err: x->%d y->%d\n",d_x[index],d_y[index]); printf("Note: actual X_MAX is %d and Y_MIN is %d\n",X_MAX - 1, Y_MIN + 1); }else{ //printf("index:%d\n",index); d_y[index] = 0 - d_y[index]; d_r[(d_y[index] + Y_MAX) * x_lim + d_x[index] + X_MAX] = r; d_g[(d_y[index] + Y_MAX) * x_lim + d_x[index] + X_MAX] = g; d_b[(d_y[index] + Y_MAX) * x_lim + d_x[index] + X_MAX] = b; //printf("arr:%d\n",(y[index] + Y_MAX) * x_lim + x[index] + X_MAX); //printf("hello!\n"); } } struct Array array_init(struct Array arr){ int block = (X_MAX - X_MIN) * (Y_MAX - Y_MIN); int size = block * sizeof(int); //int * gp_arr_r; //int * gp_arr_g; //int * gp_arr_b; //arr.d_r = (int*)malloc(size); //arr.d_g = (int*)malloc(size); //arr.d_b = (int*)malloc(size); hipMalloc((void **)&(arr.d_r),size); hipMalloc((void **)&(arr.d_g),size); hipMalloc((void **)&(arr.d_b),size); hipLaunchKernelGGL(( gp_array_list_init), dim3(block),dim3(1), 0, 0, arr.d_r,ARR_R); hipLaunchKernelGGL(( gp_array_list_init), dim3(block),dim3(1), 0, 0, arr.d_g,ARR_G); hipLaunchKernelGGL(( gp_array_list_init), dim3(block),dim3(1), 0, 0, arr.d_b,ARR_B); // hipMemcpy(arr.r, gp_arr_r, size, hipMemcpyDeviceToHost); // hipMemcpy(arr.g, gp_arr_g, size, hipMemcpyDeviceToHost); // hipMemcpy(arr.b, gp_arr_b, size, hipMemcpyDeviceToHost); // hipFree(gp_arr_r); // hipFree(gp_arr_g); // hipFree(gp_arr_b); return arr; } /* struct Array arr_set(struct Array arr,int x, int y, int color[3]){ int x_lim = X_MAX - X_MIN; if(y > Y_MAX || y <= Y_MIN || x >= X_MAX || x < X_MIN){ printf("Err: in arr, coordinate has exceded dimension\n"); printf("Err: x->%d y->%d\n",x,y); printf("Note: actual X_MAX is %d and Y_MIN is %d\n",X_MAX - 1, Y_MIN + 1); }else{ y = 0 - y; arr.r[(y + Y_MAX) * x_lim + x + X_MAX] = color[0]; arr.g[(y + Y_MAX) * x_lim + x + X_MAX] = color[1]; arr.b[(y + Y_MAX) * x_lim + x + X_MAX] = color[2]; } return arr; } /* void array_print(struct Array arr){ int size = (X_MAX - X_MIN) * (Y_MAX - Y_MIN); int i; for(i = 0;i < size; i++){ printf("r[%d] = %d\n",i,arr.r[i]); printf("g[%d] = %d\n",i,arr.g[i]); printf("b[%d] = %d\n",i,arr.b[i]); } } */ void array_push(struct Array arr, char * filename){ int y_lim = Y_MAX - Y_MIN; int x_lim = X_MAX - X_MIN; char line[20]; int * h_r; int * h_g; int * h_b; int block = (X_MAX - X_MIN) * (Y_MAX - Y_MIN); int size = block * sizeof(int); h_r = (int*)malloc(size); h_g = (int*)malloc(size); h_b = (int*)malloc(size); hipMemcpy(h_r, arr.d_r, size, hipMemcpyDeviceToHost); hipMemcpy(h_g, arr.d_g, size, hipMemcpyDeviceToHost); hipMemcpy(h_b, arr.d_b, size, hipMemcpyDeviceToHost); int i,j,file; file = open(filename, O_CREAT | O_TRUNC | O_WRONLY, 0644); snprintf(line, sizeof(line), "P3 %d %d 255\n",x_lim,y_lim); write (file, line, strlen(line)); for(i = 0; i < y_lim; i++){ for(j = 0; j < x_lim; j++){ snprintf(line, sizeof(line), "%d %d %d\n", h_r[i * y_lim + j], h_g[i * y_lim + j], h_b[i * y_lim + j]); write(file, line, strlen(line)); } } }
650832569a5dbb7aa6cdab4afcf69500459ddad9.cu
#include "engine.h" //using cuda to initiate canvas array __global__ void gp_array_list_init(int *list, int val){ list[blockIdx.x] = val; //printf("list[%d]:%d\n",blockIdx.x,list[blockIdx.x]); } __global__ void gp_array_set(int *d_r,int *d_g,int *d_b, int *d_x, int *d_y, int r, int g, int b){//int *d_r, int *d_g, int *d_b, int *x, int *y){ int index = blockIdx.x * blockDim.x + threadIdx.x; int x_lim = X_MAX - X_MIN; printf("d_r%d\n",d_r[index]); printf("d_y%d\n",d_y[index]); //int buffer = (d_y[index] > X_) ? d_y[index] : ok; //printf("buffer:%d\n",buffer); //printf("compare:%d\n",buffer > Y if(d_y[index] > Y_MAX || d_y[index] <= Y_MIN || d_x[index] >= X_MAX || d_x[index] < X_MIN){ printf("Err: in arr, coordinate has exceded dimension\n"); printf("Err: x->%d y->%d\n",d_x[index],d_y[index]); printf("Note: actual X_MAX is %d and Y_MIN is %d\n",X_MAX - 1, Y_MIN + 1); }else{ //printf("index:%d\n",index); d_y[index] = 0 - d_y[index]; d_r[(d_y[index] + Y_MAX) * x_lim + d_x[index] + X_MAX] = r; d_g[(d_y[index] + Y_MAX) * x_lim + d_x[index] + X_MAX] = g; d_b[(d_y[index] + Y_MAX) * x_lim + d_x[index] + X_MAX] = b; //printf("arr:%d\n",(y[index] + Y_MAX) * x_lim + x[index] + X_MAX); //printf("hello!\n"); } } struct Array array_init(struct Array arr){ int block = (X_MAX - X_MIN) * (Y_MAX - Y_MIN); int size = block * sizeof(int); //int * gp_arr_r; //int * gp_arr_g; //int * gp_arr_b; //arr.d_r = (int*)malloc(size); //arr.d_g = (int*)malloc(size); //arr.d_b = (int*)malloc(size); cudaMalloc((void **)&(arr.d_r),size); cudaMalloc((void **)&(arr.d_g),size); cudaMalloc((void **)&(arr.d_b),size); gp_array_list_init<<<block,1>>>(arr.d_r,ARR_R); gp_array_list_init<<<block,1>>>(arr.d_g,ARR_G); gp_array_list_init<<<block,1>>>(arr.d_b,ARR_B); // cudaMemcpy(arr.r, gp_arr_r, size, cudaMemcpyDeviceToHost); // cudaMemcpy(arr.g, gp_arr_g, size, cudaMemcpyDeviceToHost); // cudaMemcpy(arr.b, gp_arr_b, size, cudaMemcpyDeviceToHost); // cudaFree(gp_arr_r); // cudaFree(gp_arr_g); // cudaFree(gp_arr_b); return arr; } /* struct Array arr_set(struct Array arr,int x, int y, int color[3]){ int x_lim = X_MAX - X_MIN; if(y > Y_MAX || y <= Y_MIN || x >= X_MAX || x < X_MIN){ printf("Err: in arr, coordinate has exceded dimension\n"); printf("Err: x->%d y->%d\n",x,y); printf("Note: actual X_MAX is %d and Y_MIN is %d\n",X_MAX - 1, Y_MIN + 1); }else{ y = 0 - y; arr.r[(y + Y_MAX) * x_lim + x + X_MAX] = color[0]; arr.g[(y + Y_MAX) * x_lim + x + X_MAX] = color[1]; arr.b[(y + Y_MAX) * x_lim + x + X_MAX] = color[2]; } return arr; } /* void array_print(struct Array arr){ int size = (X_MAX - X_MIN) * (Y_MAX - Y_MIN); int i; for(i = 0;i < size; i++){ printf("r[%d] = %d\n",i,arr.r[i]); printf("g[%d] = %d\n",i,arr.g[i]); printf("b[%d] = %d\n",i,arr.b[i]); } } */ void array_push(struct Array arr, char * filename){ int y_lim = Y_MAX - Y_MIN; int x_lim = X_MAX - X_MIN; char line[20]; int * h_r; int * h_g; int * h_b; int block = (X_MAX - X_MIN) * (Y_MAX - Y_MIN); int size = block * sizeof(int); h_r = (int*)malloc(size); h_g = (int*)malloc(size); h_b = (int*)malloc(size); cudaMemcpy(h_r, arr.d_r, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_g, arr.d_g, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_b, arr.d_b, size, cudaMemcpyDeviceToHost); int i,j,file; file = open(filename, O_CREAT | O_TRUNC | O_WRONLY, 0644); snprintf(line, sizeof(line), "P3 %d %d 255\n",x_lim,y_lim); write (file, line, strlen(line)); for(i = 0; i < y_lim; i++){ for(j = 0; j < x_lim; j++){ snprintf(line, sizeof(line), "%d %d %d\n", h_r[i * y_lim + j], h_g[i * y_lim + j], h_b[i * y_lim + j]); write(file, line, strlen(line)); } } }
527c24d08584650bd9e40218248b5483ef5a3355.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cassert> #include <hip/hip_runtime.h> #include <cusolverDn.h> #include <cusolverSp.h> #include <hipsparse.h> #include <rocblas.h> #include <vector> #include "mesh.h" #include "utils.h" #include "gpu_utils.h" ////////////// Converts dense matrix to CSR and solves linear system /////////////////// // function takes input of L, b and dim (order) // Returns soln overwritten on b // uses cuSolverSp and cuSparse void dnsspr_solve( float *L, float *b, int order, hipEvent_t start, hipEvent_t finish, float &tau) { hipsparseHandle_t handle = NULL; cusolverSpHandle_t handleS = NULL; hipStream_t stream = NULL; hipsparseMatDescr_t desc = NULL; hipsparseStatus_t status = HIPSPARSE_STATUS_SUCCESS; cusolverStatus_t status2 = CUSOLVER_STATUS_SUCCESS; hipError_t cudaStat1 = hipSuccess; const hipsparseDirection_t dir = HIPSPARSE_DIRECTION_ROW; // row major int* csrRowPtrL = NULL; int* csrColIndL = NULL; float* csrValL = NULL; int* nnzLrow; // number of non zeros per row // int nnzL; const float err = EPS; int reorder = 0; int singularity; // Setting up streams // cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); assert(hipSuccess == cudaStat1); // creating cuSparse handle // status = hipsparseCreate(&handle); assert(HIPSPARSE_STATUS_SUCCESS == status); // creating cuSolver handle // status2 = cusolverSpCreate(&handleS); assert(HIPSPARSE_STATUS_SUCCESS == status2); // setting stream to cuSparse // status = hipsparseSetStream(handle, stream); assert(HIPSPARSE_STATUS_SUCCESS == status); // creating matrixx description // status = hipsparseCreateMatDescr(&desc); assert(HIPSPARSE_STATUS_SUCCESS == status); /* setting matrix description: 0-base ordering, lower fill for Cholesky, general format */ hipsparseSetMatIndexBase(desc, HIPSPARSE_INDEX_BASE_ZERO); hipsparseSetMatType(desc, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatFillMode(desc, HIPSPARSE_FILL_MODE_LOWER); hipEventRecord(start,0); // allocating memory for CSR arrays // cudaStat1 = hipMalloc( (void**)&csrRowPtrL, sizeof(int)*(order+1)); assert(hipSuccess == cudaStat1); cudaStat1 = hipMalloc( (void**)&nnzLrow, sizeof(int)*(order)); assert(hipSuccess == cudaStat1); // count number of no n-zeros // status = hipsparseSnnz(handle, dir, order, order, desc, L, order, nnzLrow, &nnzL); assert(HIPSPARSE_STATUS_SUCCESS == status); // malloc remaining CSR array // cudaStat1 = hipMalloc( (void**)&csrValL, nnzL*sizeof(float)); assert(hipSuccess == cudaStat1); cudaStat1 = hipMalloc( (void**)&csrColIndL, nnzL*sizeof(float)); assert(hipSuccess == cudaStat1); // convert from dense to sparse // hipsparseSdense2csr(handle,order,order,desc,L,order,nnzLrow,csrValL,csrRowPtrL,csrColIndL); assert(HIPSPARSE_STATUS_SUCCESS == status); hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau, start, finish); // set stream to cuSolver // status2 = cusolverSpSetStream(handleS, stream); assert(CUSOLVER_STATUS_SUCCESS == status2); // solver using Cholesky factorisation // status2 = cusolverSpScsrlsvchol(handleS, order, nnzL, desc, csrValL, csrRowPtrL, csrColIndL, b, err, reorder, b, &singularity); assert(CUSOLVER_STATUS_SUCCESS == status2); // destroy handles, desc & stream // hipsparseDestroy(handle); cusolverSpDestroy(handleS); hipStreamDestroy(stream); hipsparseDestroyMatDescr(desc); } /////// ///////////////////// Solves linear system in CSR format ////////////////////////// // NOTE: see comments from fn above // void sparse_solve( float *valsL, int *rowPtrL, int *colPtrL, float *b, int order, int nnz) { cusolverSpHandle_t handle = NULL; hipStream_t stream = NULL; hipsparseMatDescr_t desc = NULL; hipsparseStatus_t status = HIPSPARSE_STATUS_SUCCESS; cusolverStatus_t status2 = CUSOLVER_STATUS_SUCCESS; hipError_t cudaStat1 = hipSuccess; const float err = EPS; int reorder = 0; int singularity; cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); assert(hipSuccess == cudaStat1); status2 = cusolverSpCreate(&handle); assert(CUSOLVER_STATUS_SUCCESS == status2); status = hipsparseCreateMatDescr(&desc); assert(HIPSPARSE_STATUS_SUCCESS == status); hipsparseSetMatIndexBase(desc, HIPSPARSE_INDEX_BASE_ZERO); hipsparseSetMatType(desc, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatFillMode(desc, HIPSPARSE_FILL_MODE_LOWER); status2 = cusolverSpSetStream(handle, stream); assert(CUSOLVER_STATUS_SUCCESS == status2); status2 = cusolverSpScsrlsvchol(handle, order, nnz, desc, valsL, rowPtrL, colPtrL, b, err, reorder, b, &singularity); assert(CUSOLVER_STATUS_SUCCESS == status2); cusolverSpDestroy(handle); hipStreamDestroy(stream); hipsparseDestroyMatDescr(desc); } //////// //////////////////// Solves linear system stored in dense format //////////////////// // function takes input of L, b and dim (order) // Returns soln overwritten on b // uses cuSolverDn void dense_solve(float *L, float *b, int order){ hipsolverDnHandle_t handle = NULL; hipStream_t stream = NULL; cusolverStatus_t status = CUSOLVER_STATUS_SUCCESS; hipError_t cudaStat1 = hipSuccess; const hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER; const int nrhs = 1; float *buffer = NULL; int bufferSize = 0; int *info = NULL; int h_info = 0; // setting cuSolver (Dense) handle // status = hipsolverDnCreate(&handle); assert(CUSOLVER_STATUS_SUCCESS == status); cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); assert(hipSuccess == cudaStat1); status = hipsolverDnSetStream(handle, stream); assert(CUSOLVER_STATUS_SUCCESS == status); // calculating buffer size needed for factorisation fn // status = hipsolverDnSpotrf_bufferSize(handle, uplo, order, L, order, &bufferSize); assert(CUSOLVER_STATUS_SUCCESS == status); // allocating space for buffer on GPU // cudaStat1 = hipMalloc( (void**)&info, sizeof(int)); assert(hipSuccess == cudaStat1); cudaStat1 = hipMalloc( (void**)&buffer, bufferSize*sizeof(float)); assert(hipSuccess == cudaStat1); hipMemset(info, 0, sizeof(int)); // applying Cholesky factorisation to matrix // status = hipsolverDnSpotrf(handle, uplo, order, L, order, buffer, bufferSize, info); cudaStat1 = hipDeviceSynchronize(); // sync needed since non-blocking streams assert(CUSOLVER_STATUS_SUCCESS == status); assert(hipSuccess == cudaStat1); hipMemcpy(&h_info, info, sizeof(int), hipMemcpyDeviceToHost); // solving linear system - overwrites existing b // status = hipsolverDnSpotrs(handle, uplo, order, nrhs, L, order, b, order, info); cudaStat1 = hipDeviceSynchronize(); assert(CUSOLVER_STATUS_SUCCESS == status); assert(hipSuccess == cudaStat1); hipsolverDnDestroy(handle); hipStreamDestroy(stream); } /////// /////////////// Function gets error using 2-norm ///////////////// // Calculated using cuBLAS void error_dot_prod(float *a, float *b, int n, float &x){ hipblasHandle_t handle; hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS; const float alpha = -1.0; // creating cuBLAS handle // status = hipblasCreate(&handle); assert(status == HIPBLAS_STATUS_SUCCESS); // y = ax + y // // function sets b = b - a // status = hipblasSaxpy(handle, n, &alpha, a, 1, b, 1); assert(status == HIPBLAS_STATUS_SUCCESS); // gets <b,b> // status = hipblasSnrm2(handle, n, b, 1, &x); assert(status == HIPBLAS_STATUS_SUCCESS); // destroys handle // status = hipblasDestroy(handle); assert(status == HIPBLAS_STATUS_SUCCESS); } /////// /////////////// Function gets max value of array ///////////////// // Calculated using cuBLAS void array_max(double *a, int n, int &max){ hipblasHandle_t handle; hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS; // creating cuBLAS handle // status = hipblasCreate(&handle); assert(status == HIPBLAS_STATUS_SUCCESS); // getting maximum value of array // status = hipblasIdamax(handle, n, a, 0, &max); assert(status == HIPBLAS_STATUS_SUCCESS); // destroys handle // status = hipblasDestroy(handle); assert(status == HIPBLAS_STATUS_SUCCESS); } //////// ////////////////////// Dummy kernel /////////////////////////// // To run to reduce the effect of the initial // kernel running slowly __global__ void dummy_kernel(int n){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y + threadIdx.y; int count = 0; if(idx < n && idy < n){ for(int i=0; i<n; i++) count++; } } ////// ////////////////////// Dummy kernel /////////////////////////// // To run to reduce the effect of the initial // kernel running slowly extern void dummy(float *dat, int n){ float *a, *b, *c, *d; hipError_t stat = hipSuccess; stat = hipSetDevice(k); assert(stat == hipSuccess); stat = hipMalloc( (void**)&a, n*sizeof(float)); assert(stat == hipSuccess); stat = hipMalloc( (void**)&b, n*sizeof(float)); assert(stat == hipSuccess); stat = hipMalloc( (void**)&c, n*sizeof(float)); assert(stat == hipSuccess); stat = hipMalloc( (void**)&d, n*sizeof(float)); assert(stat == hipSuccess); stat = hipMemcpy(a, dat, n*sizeof(float), hipMemcpyHostToDevice); assert(stat == hipSuccess); stat = hipMemcpy(b, dat, n*sizeof(float), hipMemcpyHostToDevice); assert(stat == hipSuccess); stat = hipMemcpy(c, dat, n*sizeof(float), hipMemcpyHostToDevice); assert(stat == hipSuccess); stat = hipMemcpy(d, dat, n*sizeof(float), hipMemcpyHostToDevice); assert(stat == hipSuccess); dim3 dimBlock(50, 10); dim3 dimGrid((n/dimBlock.x) + (!(n%dimBlock.x)?0:1), (n/dimBlock.y) + (!(n%dimBlock.y)?0:1)); hipLaunchKernelGGL(( dummy_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, n); hipFree(a); hipFree(b); hipFree(c); hipFree(d); } ///////
527c24d08584650bd9e40218248b5483ef5a3355.cu
#include <cstdio> #include <cassert> #include <cuda_runtime.h> #include <cusolverDn.h> #include <cusolverSp.h> #include <cusparse.h> #include <cublas_v2.h> #include <vector> #include "mesh.h" #include "utils.h" #include "gpu_utils.h" ////////////// Converts dense matrix to CSR and solves linear system /////////////////// // function takes input of L, b and dim (order) // Returns soln overwritten on b // uses cuSolverSp and cuSparse void dnsspr_solve( float *L, float *b, int order, cudaEvent_t start, cudaEvent_t finish, float &tau) { cusparseHandle_t handle = NULL; cusolverSpHandle_t handleS = NULL; cudaStream_t stream = NULL; cusparseMatDescr_t desc = NULL; cusparseStatus_t status = CUSPARSE_STATUS_SUCCESS; cusolverStatus_t status2 = CUSOLVER_STATUS_SUCCESS; cudaError_t cudaStat1 = cudaSuccess; const cusparseDirection_t dir = CUSPARSE_DIRECTION_ROW; // row major int* csrRowPtrL = NULL; int* csrColIndL = NULL; float* csrValL = NULL; int* nnzLrow; // number of non zeros per row // int nnzL; const float err = EPS; int reorder = 0; int singularity; // Setting up streams // cudaStat1 = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); assert(cudaSuccess == cudaStat1); // creating cuSparse handle // status = cusparseCreate(&handle); assert(CUSPARSE_STATUS_SUCCESS == status); // creating cuSolver handle // status2 = cusolverSpCreate(&handleS); assert(CUSPARSE_STATUS_SUCCESS == status2); // setting stream to cuSparse // status = cusparseSetStream(handle, stream); assert(CUSPARSE_STATUS_SUCCESS == status); // creating matrixx description // status = cusparseCreateMatDescr(&desc); assert(CUSPARSE_STATUS_SUCCESS == status); /* setting matrix description: 0-base ordering, lower fill for Cholesky, general format */ cusparseSetMatIndexBase(desc, CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatType(desc, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatFillMode(desc, CUSPARSE_FILL_MODE_LOWER); cudaEventRecord(start,0); // allocating memory for CSR arrays // cudaStat1 = cudaMalloc( (void**)&csrRowPtrL, sizeof(int)*(order+1)); assert(cudaSuccess == cudaStat1); cudaStat1 = cudaMalloc( (void**)&nnzLrow, sizeof(int)*(order)); assert(cudaSuccess == cudaStat1); // count number of no n-zeros // status = cusparseSnnz(handle, dir, order, order, desc, L, order, nnzLrow, &nnzL); assert(CUSPARSE_STATUS_SUCCESS == status); // malloc remaining CSR array // cudaStat1 = cudaMalloc( (void**)&csrValL, nnzL*sizeof(float)); assert(cudaSuccess == cudaStat1); cudaStat1 = cudaMalloc( (void**)&csrColIndL, nnzL*sizeof(float)); assert(cudaSuccess == cudaStat1); // convert from dense to sparse // cusparseSdense2csr(handle,order,order,desc,L,order,nnzLrow,csrValL,csrRowPtrL,csrColIndL); assert(CUSPARSE_STATUS_SUCCESS == status); cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau, start, finish); // set stream to cuSolver // status2 = cusolverSpSetStream(handleS, stream); assert(CUSOLVER_STATUS_SUCCESS == status2); // solver using Cholesky factorisation // status2 = cusolverSpScsrlsvchol(handleS, order, nnzL, desc, csrValL, csrRowPtrL, csrColIndL, b, err, reorder, b, &singularity); assert(CUSOLVER_STATUS_SUCCESS == status2); // destroy handles, desc & stream // cusparseDestroy(handle); cusolverSpDestroy(handleS); cudaStreamDestroy(stream); cusparseDestroyMatDescr(desc); } /////// ///////////////////// Solves linear system in CSR format ////////////////////////// // NOTE: see comments from fn above // void sparse_solve( float *valsL, int *rowPtrL, int *colPtrL, float *b, int order, int nnz) { cusolverSpHandle_t handle = NULL; cudaStream_t stream = NULL; cusparseMatDescr_t desc = NULL; cusparseStatus_t status = CUSPARSE_STATUS_SUCCESS; cusolverStatus_t status2 = CUSOLVER_STATUS_SUCCESS; cudaError_t cudaStat1 = cudaSuccess; const float err = EPS; int reorder = 0; int singularity; cudaStat1 = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); assert(cudaSuccess == cudaStat1); status2 = cusolverSpCreate(&handle); assert(CUSOLVER_STATUS_SUCCESS == status2); status = cusparseCreateMatDescr(&desc); assert(CUSPARSE_STATUS_SUCCESS == status); cusparseSetMatIndexBase(desc, CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatType(desc, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatFillMode(desc, CUSPARSE_FILL_MODE_LOWER); status2 = cusolverSpSetStream(handle, stream); assert(CUSOLVER_STATUS_SUCCESS == status2); status2 = cusolverSpScsrlsvchol(handle, order, nnz, desc, valsL, rowPtrL, colPtrL, b, err, reorder, b, &singularity); assert(CUSOLVER_STATUS_SUCCESS == status2); cusolverSpDestroy(handle); cudaStreamDestroy(stream); cusparseDestroyMatDescr(desc); } //////// //////////////////// Solves linear system stored in dense format //////////////////// // function takes input of L, b and dim (order) // Returns soln overwritten on b // uses cuSolverDn void dense_solve(float *L, float *b, int order){ cusolverDnHandle_t handle = NULL; cudaStream_t stream = NULL; cusolverStatus_t status = CUSOLVER_STATUS_SUCCESS; cudaError_t cudaStat1 = cudaSuccess; const cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER; const int nrhs = 1; float *buffer = NULL; int bufferSize = 0; int *info = NULL; int h_info = 0; // setting cuSolver (Dense) handle // status = cusolverDnCreate(&handle); assert(CUSOLVER_STATUS_SUCCESS == status); cudaStat1 = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); assert(cudaSuccess == cudaStat1); status = cusolverDnSetStream(handle, stream); assert(CUSOLVER_STATUS_SUCCESS == status); // calculating buffer size needed for factorisation fn // status = cusolverDnSpotrf_bufferSize(handle, uplo, order, L, order, &bufferSize); assert(CUSOLVER_STATUS_SUCCESS == status); // allocating space for buffer on GPU // cudaStat1 = cudaMalloc( (void**)&info, sizeof(int)); assert(cudaSuccess == cudaStat1); cudaStat1 = cudaMalloc( (void**)&buffer, bufferSize*sizeof(float)); assert(cudaSuccess == cudaStat1); cudaMemset(info, 0, sizeof(int)); // applying Cholesky factorisation to matrix // status = cusolverDnSpotrf(handle, uplo, order, L, order, buffer, bufferSize, info); cudaStat1 = cudaDeviceSynchronize(); // sync needed since non-blocking streams assert(CUSOLVER_STATUS_SUCCESS == status); assert(cudaSuccess == cudaStat1); cudaMemcpy(&h_info, info, sizeof(int), cudaMemcpyDeviceToHost); // solving linear system - overwrites existing b // status = cusolverDnSpotrs(handle, uplo, order, nrhs, L, order, b, order, info); cudaStat1 = cudaDeviceSynchronize(); assert(CUSOLVER_STATUS_SUCCESS == status); assert(cudaSuccess == cudaStat1); cusolverDnDestroy(handle); cudaStreamDestroy(stream); } /////// /////////////// Function gets error using 2-norm ///////////////// // Calculated using cuBLAS void error_dot_prod(float *a, float *b, int n, float &x){ cublasHandle_t handle; cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const float alpha = -1.0; // creating cuBLAS handle // status = cublasCreate(&handle); assert(status == CUBLAS_STATUS_SUCCESS); // y = ax + y // // function sets b = b - a // status = cublasSaxpy(handle, n, &alpha, a, 1, b, 1); assert(status == CUBLAS_STATUS_SUCCESS); // gets <b,b> // status = cublasSnrm2(handle, n, b, 1, &x); assert(status == CUBLAS_STATUS_SUCCESS); // destroys handle // status = cublasDestroy(handle); assert(status == CUBLAS_STATUS_SUCCESS); } /////// /////////////// Function gets max value of array ///////////////// // Calculated using cuBLAS void array_max(double *a, int n, int &max){ cublasHandle_t handle; cublasStatus_t status = CUBLAS_STATUS_SUCCESS; // creating cuBLAS handle // status = cublasCreate(&handle); assert(status == CUBLAS_STATUS_SUCCESS); // getting maximum value of array // status = cublasIdamax(handle, n, a, 0, &max); assert(status == CUBLAS_STATUS_SUCCESS); // destroys handle // status = cublasDestroy(handle); assert(status == CUBLAS_STATUS_SUCCESS); } //////// ////////////////////// Dummy kernel /////////////////////////// // To run to reduce the effect of the initial // kernel running slowly __global__ void dummy_kernel(int n){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y + threadIdx.y; int count = 0; if(idx < n && idy < n){ for(int i=0; i<n; i++) count++; } } ////// ////////////////////// Dummy kernel /////////////////////////// // To run to reduce the effect of the initial // kernel running slowly extern void dummy(float *dat, int n){ float *a, *b, *c, *d; cudaError_t stat = cudaSuccess; stat = cudaSetDevice(k); assert(stat == cudaSuccess); stat = cudaMalloc( (void**)&a, n*sizeof(float)); assert(stat == cudaSuccess); stat = cudaMalloc( (void**)&b, n*sizeof(float)); assert(stat == cudaSuccess); stat = cudaMalloc( (void**)&c, n*sizeof(float)); assert(stat == cudaSuccess); stat = cudaMalloc( (void**)&d, n*sizeof(float)); assert(stat == cudaSuccess); stat = cudaMemcpy(a, dat, n*sizeof(float), cudaMemcpyHostToDevice); assert(stat == cudaSuccess); stat = cudaMemcpy(b, dat, n*sizeof(float), cudaMemcpyHostToDevice); assert(stat == cudaSuccess); stat = cudaMemcpy(c, dat, n*sizeof(float), cudaMemcpyHostToDevice); assert(stat == cudaSuccess); stat = cudaMemcpy(d, dat, n*sizeof(float), cudaMemcpyHostToDevice); assert(stat == cudaSuccess); dim3 dimBlock(50, 10); dim3 dimGrid((n/dimBlock.x) + (!(n%dimBlock.x)?0:1), (n/dimBlock.y) + (!(n%dimBlock.y)?0:1)); dummy_kernel<<<dimGrid, dimBlock>>>(n); cudaFree(a); cudaFree(b); cudaFree(c); cudaFree(d); } ///////
5a3b8fac8f6f39b150b7b9761110f7f1cdc77d1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/utility_ops.h" #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/system/hip/execution_policy.h> #include <thrust/unique.h> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/flatten_op.h" #include "caffe2/utils/math.h" namespace caffe2 { template <> bool WeightedSumOp<CUDAContext>::RunOnDevice() { if (Input(0).IsType<float>()) { return DoRunWithType<float>(); } else if (Input(0).IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported inputs"); } return false; } template <> bool SumOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<float, at::Half, int32_t, int64_t>>::call( this, Input(0)); } REGISTER_CUDA_OPERATOR(Print, PrintOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Flatten, FlattenOp<CUDAContext>); REGISTER_CUDA_OPERATOR(FlattenToVec, FlattenToVecOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Alias, AliasOp<CUDAContext>); REGISTER_CUDA_OPERATOR(ResizeLike, ResizeLikeOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Sum, SumOp<CUDAContext>); REGISTER_CUDA_OPERATOR(WeightedSum, WeightedSumOp<CUDAContext>); REGISTER_CUDA_OPERATOR(EnsureDense, EnsureDenseOp<CUDAContext>); __global__ void NanCheckKernel(int N, const float* X, bool* result) { bool has_nan = false; CUDA_1D_KERNEL_LOOP(i, N) { // Note: we have no need to do early return, since only if this fails // will we not need to inspect all elements. No need to optimize the // case that will fail. has_nan = has_nan || isnan(X[i]) || isinf(X[i]); } __syncthreads(); if (has_nan) { result[0] = true; } } template <> bool NanCheckOp<CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); const size_t N = X.numel(); const float* data_ptr = X.data<float>(); ReinitializeTensor(&scratch_, {1}, at::dtype<bool>().device(CUDA)); math::Set<bool, CUDAContext>( 1, false, scratch_.mutable_data<bool>(), &context_); hipLaunchKernelGGL(( NanCheckKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, X.data<float>(), scratch_.mutable_data<bool>()); C10_HIP_KERNEL_LAUNCH_CHECK(); bool result = false; { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CUDA_ENFORCE(hipMemcpyAsync( &result, scratch_.raw_data(), 1, hipMemcpyDefault, context_.cuda_stream())); } // Note: we must synchronize here so we can inspect the result context_.FinishDeviceComputation(); // Print out diagnostic info if we have a NaN or inf if (result) { std::cerr << "Tensor contained NaN or inf: " << this->debug_def().input(0) << std::endl; for (int j = 0; j < InputSize(); j++) { Tensor cpu_X(CPU); cpu_X.ResizeLike(Input(j)); // Hack to cause allocation happen here, so it won't happen // when we do CopyFrom. We need the mutex then because host->gpu // copies seem to possibly lock with NCCL. cpu_X.mutable_data<float>(); { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); cpu_X.CopyFrom(Input(j)); // sync copy } std::cerr << "Input tensor: " << j << ": [" << this->debug_def().input(j) << "]" << std::endl; tensorPrinter_.Print<float>(cpu_X); if (j == 0) { std::cerr << "NaN idxs:" << std::endl; auto* cpu_X_data = cpu_X.data<float>(); for (size_t i = 0; i < cpu_X.numel(); ++i) { if (std::isnan(cpu_X_data[i]) || std::isinf(cpu_X_data[i])) { std::cerr << i << " "; } } } std::cerr << std::endl; } return false; } // This op should act as an identity matrix if we don't find any NaNs/infs. // Copy over the data if we are not doing this in-place. if (&X != Y) { Y->CopyFrom(X, true /*async*/); } return true; } REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>); /** * @brief Update slices of Y in-place with a batch of weighted X's. * Y[idx] = alpha[b] * X[b][i] + Y[idx] * i=0,...,N-1 * b=0,...,B-1 * idx=Indices[i] */ template <typename T_INDEX> __global__ void AxpySliceKernel( const float* weight0, const int64_t N, const int64_t B, const int64_t slice_size, const float** alpha, const float** X, const T_INDEX* Indices, float* Y, const int64_t M) { // This implementation requires that the first weight is 1.0 CUDA_KERNEL_ASSERT(weight0[0] == 1.0); for (int i = blockIdx.x; i < N; i += gridDim.x) { T_INDEX idx = Indices[i]; float* y_offset = Y + (idx * slice_size); for (int b = 0; b < B; b++) { float a = *alpha[b]; const float* x_offset = X[b] + (i * slice_size); for (int j = threadIdx.x; j < slice_size; j += blockDim.x) { atomicAdd(&y_offset[j], a * x_offset[j]); } } } } // this kernel is a custom version of AxpySliceKernel // to be used when there is only one weighted X to update // slice of Y. template <typename T_INDEX> __global__ void AxpySliceKernel2( const float* weight0, const int64_t N, const int64_t slice_size, const float* alpha, const float* X, const T_INDEX* Indices, float* Y, const int64_t M) { // This implementation requires that the first weight is 1.0 CUDA_KERNEL_ASSERT(weight0[0] == 1.0); for (int i = blockIdx.x; i < N; i += gridDim.x) { T_INDEX idx = Indices[i]; float* y_offset = Y + (idx * slice_size); for (int j = threadIdx.x; j < slice_size; j += blockDim.x) { atomicAdd(&y_offset[j], alpha[0] * X[(i * slice_size) + j]); } } } template <> bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2)); } template <> template <typename Index> bool ScatterWeightedSumOp<float, CUDAContext>::DoRunWithType() { CAFFE_ENFORCE_EQ(InputSize() % 2, 1); auto& X0 = Input(0); auto& weight0 = Input(1); auto& indices = Input(2); auto* output = Output(0); CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required"); CAFFE_ENFORCE_GT(X0.numel(), 0); CAFFE_ENFORCE_GT(X0.dim(), 0, "X0 has to be at least the vector"); CAFFE_ENFORCE_EQ(weight0.numel(), 1); int64_t M = X0.numel(); int64_t N = X0.dim(0); int64_t K = indices.numel(); int64_t block_size = M / N; float* data = output->template mutable_data<float>(); const int64_t B = (InputSize() - 3) / 2; if (B > 1) { // In order to have all device pointers of x_i (and weight_i similarly) // consecutively in device memory, copy pointers to a host vector and then // copy back into a device array. ReinitializeTensor(&x_data_host_, {B}, at::dtype<float*>().device(CPU)); ReinitializeTensor(&weights_host_, {B}, at::dtype<float*>().device(CPU)); ReinitializeTensor(&x_data_device_, {B}, at::dtype<float*>().device(CUDA)); ReinitializeTensor(&weights_device_, {B}, at::dtype<float*>().device(CUDA)); float** x_data_host = x_data_host_.mutable_data<float*>(); float** weights_host = weights_host_.mutable_data<float*>(); float** x_data_device = x_data_device_.mutable_data<float*>(); float** weights_device = weights_device_.mutable_data<float*>(); for (int inp = 3; inp < InputSize(); inp += 2) { int idx = (inp - 3) / 2; x_data_host[idx] = static_cast<float*>(Input(inp).raw_data()); weights_host[idx] = static_cast<float*>(Input(inp + 1).raw_data()); } context_.Copy<float*, CPUContext, CUDAContext>( B, x_data_host, x_data_device); context_.Copy<float*, CPUContext, CUDAContext>( B, weights_host, weights_device); hipLaunchKernelGGL(( AxpySliceKernel), dim3(std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), weight0.template data<float>(), K, B, block_size, const_cast<const float**>(weights_device), const_cast<const float**>(x_data_device), indices.template data<Index>(), data, M); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { // when only one input exists to update data buffer, // avoid copying pointers to device array to prevent // copy overhead auto& X1 = Input(3); auto& weight1 = Input(4); hipLaunchKernelGGL(( AxpySliceKernel2), dim3(std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), weight0.template data<float>(), K, block_size, weight1.template data<float>(), X1.template data<float>(), indices.template data<Index>(), data, M); C10_HIP_KERNEL_LAUNCH_CHECK(); } return true; } REGISTER_CUDA_OPERATOR( ScatterWeightedSum, ScatterWeightedSumOp<float, CUDAContext>); namespace { template <typename Index, typename T> __global__ void scatter_assign_kernel( T* data, const Index* idxs, const T* slicesData, int64_t N, int64_t K, int64_t block_size) { for (int64_t i = blockIdx.x; i < K; i += gridDim.x) { Index idx = idxs[i]; CUDA_KERNEL_ASSERT(0 <= idx && idx < N); const T* src = slicesData + block_size * i; T* dest = data + block_size * idx; for (int64_t j = threadIdx.x; j < block_size; j += blockDim.x) { dest[j] = src[j]; } } } } // namespace template <> template <typename Index, typename T> void ScatterAssignOp<CUDAContext>::DoScatterAssign( T* data, const Index* idxs, const T* slicesData, int64_t N, int64_t K, int64_t block_size) { hipLaunchKernelGGL(( scatter_assign_kernel), dim3(::min(K, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS))), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), data, idxs, slicesData, N, K, block_size); C10_HIP_KERNEL_LAUNCH_CHECK(); } REGISTER_CUDA_OPERATOR(ScatterAssign, ScatterAssignOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>); template <typename T> __global__ void RangeKernel(const int n, T* Y, T offset, T step) { CUDA_1D_KERNEL_LOOP(index, n) { Y[index] = index * step + offset; } } template <> template <typename T> bool RangeOp<CUDAContext>::DoRunOnDevice( const T& start, const T& step, Tensor* output) { int N = output->numel(); hipLaunchKernelGGL(( RangeKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, output->template mutable_data<T>(), start, step); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } REGISTER_CUDA_OPERATOR(Range, RangeOp<CUDAContext>); } // namespace caffe2
5a3b8fac8f6f39b150b7b9761110f7f1cdc77d1a.cu
#include "caffe2/operators/utility_ops.h" #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/system/cuda/execution_policy.h> #include <thrust/unique.h> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/flatten_op.h" #include "caffe2/utils/math.h" namespace caffe2 { template <> bool WeightedSumOp<CUDAContext>::RunOnDevice() { if (Input(0).IsType<float>()) { return DoRunWithType<float>(); } else if (Input(0).IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported inputs"); } return false; } template <> bool SumOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<float, at::Half, int32_t, int64_t>>::call( this, Input(0)); } REGISTER_CUDA_OPERATOR(Print, PrintOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Flatten, FlattenOp<CUDAContext>); REGISTER_CUDA_OPERATOR(FlattenToVec, FlattenToVecOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Alias, AliasOp<CUDAContext>); REGISTER_CUDA_OPERATOR(ResizeLike, ResizeLikeOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Sum, SumOp<CUDAContext>); REGISTER_CUDA_OPERATOR(WeightedSum, WeightedSumOp<CUDAContext>); REGISTER_CUDA_OPERATOR(EnsureDense, EnsureDenseOp<CUDAContext>); __global__ void NanCheckKernel(int N, const float* X, bool* result) { bool has_nan = false; CUDA_1D_KERNEL_LOOP(i, N) { // Note: we have no need to do early return, since only if this fails // will we not need to inspect all elements. No need to optimize the // case that will fail. has_nan = has_nan || isnan(X[i]) || isinf(X[i]); } __syncthreads(); if (has_nan) { result[0] = true; } } template <> bool NanCheckOp<CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); const size_t N = X.numel(); const float* data_ptr = X.data<float>(); ReinitializeTensor(&scratch_, {1}, at::dtype<bool>().device(CUDA)); math::Set<bool, CUDAContext>( 1, false, scratch_.mutable_data<bool>(), &context_); NanCheckKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, X.data<float>(), scratch_.mutable_data<bool>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); bool result = false; { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CUDA_ENFORCE(cudaMemcpyAsync( &result, scratch_.raw_data(), 1, cudaMemcpyDefault, context_.cuda_stream())); } // Note: we must synchronize here so we can inspect the result context_.FinishDeviceComputation(); // Print out diagnostic info if we have a NaN or inf if (result) { std::cerr << "Tensor contained NaN or inf: " << this->debug_def().input(0) << std::endl; for (int j = 0; j < InputSize(); j++) { Tensor cpu_X(CPU); cpu_X.ResizeLike(Input(j)); // Hack to cause allocation happen here, so it won't happen // when we do CopyFrom. We need the mutex then because host->gpu // copies seem to possibly lock with NCCL. cpu_X.mutable_data<float>(); { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); cpu_X.CopyFrom(Input(j)); // sync copy } std::cerr << "Input tensor: " << j << ": [" << this->debug_def().input(j) << "]" << std::endl; tensorPrinter_.Print<float>(cpu_X); if (j == 0) { std::cerr << "NaN idxs:" << std::endl; auto* cpu_X_data = cpu_X.data<float>(); for (size_t i = 0; i < cpu_X.numel(); ++i) { if (std::isnan(cpu_X_data[i]) || std::isinf(cpu_X_data[i])) { std::cerr << i << " "; } } } std::cerr << std::endl; } return false; } // This op should act as an identity matrix if we don't find any NaNs/infs. // Copy over the data if we are not doing this in-place. if (&X != Y) { Y->CopyFrom(X, true /*async*/); } return true; } REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>); /** * @brief Update slices of Y in-place with a batch of weighted X's. * Y[idx] = alpha[b] * X[b][i] + Y[idx] * i=0,...,N-1 * b=0,...,B-1 * idx=Indices[i] */ template <typename T_INDEX> __global__ void AxpySliceKernel( const float* weight0, const int64_t N, const int64_t B, const int64_t slice_size, const float** alpha, const float** X, const T_INDEX* Indices, float* Y, const int64_t M) { // This implementation requires that the first weight is 1.0 CUDA_KERNEL_ASSERT(weight0[0] == 1.0); for (int i = blockIdx.x; i < N; i += gridDim.x) { T_INDEX idx = Indices[i]; float* y_offset = Y + (idx * slice_size); for (int b = 0; b < B; b++) { float a = *alpha[b]; const float* x_offset = X[b] + (i * slice_size); for (int j = threadIdx.x; j < slice_size; j += blockDim.x) { atomicAdd(&y_offset[j], a * x_offset[j]); } } } } // this kernel is a custom version of AxpySliceKernel // to be used when there is only one weighted X to update // slice of Y. template <typename T_INDEX> __global__ void AxpySliceKernel2( const float* weight0, const int64_t N, const int64_t slice_size, const float* alpha, const float* X, const T_INDEX* Indices, float* Y, const int64_t M) { // This implementation requires that the first weight is 1.0 CUDA_KERNEL_ASSERT(weight0[0] == 1.0); for (int i = blockIdx.x; i < N; i += gridDim.x) { T_INDEX idx = Indices[i]; float* y_offset = Y + (idx * slice_size); for (int j = threadIdx.x; j < slice_size; j += blockDim.x) { atomicAdd(&y_offset[j], alpha[0] * X[(i * slice_size) + j]); } } } template <> bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2)); } template <> template <typename Index> bool ScatterWeightedSumOp<float, CUDAContext>::DoRunWithType() { CAFFE_ENFORCE_EQ(InputSize() % 2, 1); auto& X0 = Input(0); auto& weight0 = Input(1); auto& indices = Input(2); auto* output = Output(0); CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required"); CAFFE_ENFORCE_GT(X0.numel(), 0); CAFFE_ENFORCE_GT(X0.dim(), 0, "X0 has to be at least the vector"); CAFFE_ENFORCE_EQ(weight0.numel(), 1); int64_t M = X0.numel(); int64_t N = X0.dim(0); int64_t K = indices.numel(); int64_t block_size = M / N; float* data = output->template mutable_data<float>(); const int64_t B = (InputSize() - 3) / 2; if (B > 1) { // In order to have all device pointers of x_i (and weight_i similarly) // consecutively in device memory, copy pointers to a host vector and then // copy back into a device array. ReinitializeTensor(&x_data_host_, {B}, at::dtype<float*>().device(CPU)); ReinitializeTensor(&weights_host_, {B}, at::dtype<float*>().device(CPU)); ReinitializeTensor(&x_data_device_, {B}, at::dtype<float*>().device(CUDA)); ReinitializeTensor(&weights_device_, {B}, at::dtype<float*>().device(CUDA)); float** x_data_host = x_data_host_.mutable_data<float*>(); float** weights_host = weights_host_.mutable_data<float*>(); float** x_data_device = x_data_device_.mutable_data<float*>(); float** weights_device = weights_device_.mutable_data<float*>(); for (int inp = 3; inp < InputSize(); inp += 2) { int idx = (inp - 3) / 2; x_data_host[idx] = static_cast<float*>(Input(inp).raw_data()); weights_host[idx] = static_cast<float*>(Input(inp + 1).raw_data()); } context_.Copy<float*, CPUContext, CUDAContext>( B, x_data_host, x_data_device); context_.Copy<float*, CPUContext, CUDAContext>( B, weights_host, weights_device); AxpySliceKernel<<< std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( weight0.template data<float>(), K, B, block_size, const_cast<const float**>(weights_device), const_cast<const float**>(x_data_device), indices.template data<Index>(), data, M); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // when only one input exists to update data buffer, // avoid copying pointers to device array to prevent // copy overhead auto& X1 = Input(3); auto& weight1 = Input(4); AxpySliceKernel2<<< std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( weight0.template data<float>(), K, block_size, weight1.template data<float>(), X1.template data<float>(), indices.template data<Index>(), data, M); C10_CUDA_KERNEL_LAUNCH_CHECK(); } return true; } REGISTER_CUDA_OPERATOR( ScatterWeightedSum, ScatterWeightedSumOp<float, CUDAContext>); namespace { template <typename Index, typename T> __global__ void scatter_assign_kernel( T* data, const Index* idxs, const T* slicesData, int64_t N, int64_t K, int64_t block_size) { for (int64_t i = blockIdx.x; i < K; i += gridDim.x) { Index idx = idxs[i]; CUDA_KERNEL_ASSERT(0 <= idx && idx < N); const T* src = slicesData + block_size * i; T* dest = data + block_size * idx; for (int64_t j = threadIdx.x; j < block_size; j += blockDim.x) { dest[j] = src[j]; } } } } // namespace template <> template <typename Index, typename T> void ScatterAssignOp<CUDAContext>::DoScatterAssign( T* data, const Index* idxs, const T* slicesData, int64_t N, int64_t K, int64_t block_size) { scatter_assign_kernel<<< std::min(K, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS)), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(data, idxs, slicesData, N, K, block_size); C10_CUDA_KERNEL_LAUNCH_CHECK(); } REGISTER_CUDA_OPERATOR(ScatterAssign, ScatterAssignOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>); template <typename T> __global__ void RangeKernel(const int n, T* Y, T offset, T step) { CUDA_1D_KERNEL_LOOP(index, n) { Y[index] = index * step + offset; } } template <> template <typename T> bool RangeOp<CUDAContext>::DoRunOnDevice( const T& start, const T& step, Tensor* output) { int N = output->numel(); RangeKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, output->template mutable_data<T>(), start, step); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } REGISTER_CUDA_OPERATOR(Range, RangeOp<CUDAContext>); } // namespace caffe2
9d85aba513973488595588cd9a80eb34de1c50bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Based heavily on https://developer.nvidia.com/blog/cuda-refresher-cuda-programming-model/ #include <stdio.h> const int N = 1024; const int blocksize = 16; __global__ void add_matrix( float *a, float *b, float *c, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; // blockIdx, blockDim and threadIdx are predefined int j = blockIdx.y * blockDim.y + threadIdx.y; // variables - initialised from meta-arguments int index = i + j*N; if ( i < N && j < N ) // Keep indices in range c[index] = a[index] + b[index]; } int main(void){ const int size = N*N*sizeof(float); float *a ; float *b; float *c ; float maxError = 0.0f; hipMallocManaged( (void**)&a, size ); hipMallocManaged( (void**)&b, size ); hipMallocManaged( (void**)&c, size ); for ( int i = 0; i < N*N; ++i ) { a[i] = 1.0f; b[i] = 3.5f; } dim3 dimBlock( blocksize, blocksize ); // dim3 structure to deal with 1D, 2D or 3D thread collections. dim3 dimGrid( N/dimBlock.x, N/dimBlock.y); // dimBlock.x - first dimension, dimBlock.y - second dimension // dimBlock.z for third dimension (not used)hipLaunchKernelGGL(( add_matrix), dim3(dimGrid), dim3(dimBlock), 0, 0, a, b, c, N); // Note meta arguments that pass information on // Number of thread groups (Grid) and number of // threads in each group (Block). // Wait for GPU to finish before accessing on host - major source of errors hipDeviceSynchronize(); for (int j = 0; j < N; j++){ for (int i = 0; i < N;i++) { maxError = fmax(maxError, fabs(c[i+j*N]-4.5f)); } } printf("Max error: %.16f\n", maxError ); hipFree( a ); hipFree( b ); hipFree( c ); // CLEAN UP, RETURN return 0; }
9d85aba513973488595588cd9a80eb34de1c50bd.cu
// Based heavily on https://developer.nvidia.com/blog/cuda-refresher-cuda-programming-model/ #include <stdio.h> const int N = 1024; const int blocksize = 16; __global__ void add_matrix( float *a, float *b, float *c, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; // blockIdx, blockDim and threadIdx are predefined int j = blockIdx.y * blockDim.y + threadIdx.y; // variables - initialised from meta-arguments int index = i + j*N; if ( i < N && j < N ) // Keep indices in range c[index] = a[index] + b[index]; } int main(void){ const int size = N*N*sizeof(float); float *a ; float *b; float *c ; float maxError = 0.0f; cudaMallocManaged( (void**)&a, size ); cudaMallocManaged( (void**)&b, size ); cudaMallocManaged( (void**)&c, size ); for ( int i = 0; i < N*N; ++i ) { a[i] = 1.0f; b[i] = 3.5f; } dim3 dimBlock( blocksize, blocksize ); // dim3 structure to deal with 1D, 2D or 3D thread collections. dim3 dimGrid( N/dimBlock.x, N/dimBlock.y); // dimBlock.x - first dimension, dimBlock.y - second dimension // dimBlock.z for third dimension (not used) add_matrix<<<dimGrid, dimBlock>>>( a, b, c, N); // Note meta arguments that pass information on // Number of thread groups (Grid) and number of // threads in each group (Block). // Wait for GPU to finish before accessing on host - major source of errors cudaDeviceSynchronize(); for (int j = 0; j < N; j++){ for (int i = 0; i < N;i++) { maxError = fmax(maxError, fabs(c[i+j*N]-4.5f)); } } printf("Max error: %.16f\n", maxError ); cudaFree( a ); cudaFree( b ); cudaFree( c ); // CLEAN UP, RETURN return 0; }
7fab93b3e378f28d005cd60462344daa0cc8b073.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @precisions normal z -> c d s */ #include "common_magma.h" #define BLOCK_SIZE 512 __global__ void zmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { extern __shared__ magmaDoubleComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ) { for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; magmaDoubleComplex val = dval [ num_rows * n + row ]; if( val != 0) { for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++ ) { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors hipLaunchKernelGGL(( zmgeelltmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
7fab93b3e378f28d005cd60462344daa0cc8b073.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @precisions normal z -> c d s */ #include "common_magma.h" #define BLOCK_SIZE 512 __global__ void zmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { extern __shared__ magmaDoubleComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ) { for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; magmaDoubleComplex val = dval [ num_rows * n + row ]; if( val != 0) { for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++ ) { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors zmgeelltmv_kernel<<< grid, threads, MEM_SIZE, queue >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
e7a1394de04f8ded9e1547bebd895d607da4c8b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_cdf_norm_inv (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { y[offset_y + gid * stride_y] = CAST(normcdfinv)(x[offset_x + gid * stride_x]); } }
e7a1394de04f8ded9e1547bebd895d607da4c8b4.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_cdf_norm_inv (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { y[offset_y + gid * stride_y] = CAST(normcdfinv)(x[offset_x + gid * stride_x]); } }
084454b65fdc075ea2bec013f326f12ba9b76c71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vector_add_cu(float *out, float *a, float *b, int n){ for(int i = 0; i < n; i++){ out[i] = a[i] + b[i]; } }
084454b65fdc075ea2bec013f326f12ba9b76c71.cu
#include "includes.h" __global__ void vector_add_cu(float *out, float *a, float *b, int n){ for(int i = 0; i < n; i++){ out[i] = a[i] + b[i]; } }
e74da90794de8163b9e8bbf2cd5af2e8b49b1d19.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define BLOCKSIZE 128 /*******************/ /* iDivUp FUNCTION */ /*******************/ int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } /********************/ /* CUDA ERROR CHECK */ /********************/ // --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) { exit(code); } } } void gpuErrchk(hipError_t ans) { gpuAssert((ans), __FILE__, __LINE__); } /*******************/ /* addCPU FUNCTION */ /*******************/ void addCPU(int *h_a, int *h_b, int *h_c, int N) { int h_a_tmp, h_b_tmp; for (int k = 0; k < N; k++) { if ((k % 2) == 0) { h_a_tmp = h_a[k + 1]; h_b_tmp = h_b[k + 1]; } else { h_a_tmp = h_a[k - 1]; h_b_tmp = h_b[k - 1]; } h_c[k] = h_a_tmp + h_b_tmp; } } /***************************************************/ /* addGPU FUNCTION WITH NON-CONSECUTIVE LOAD/STORE */ /***************************************************/ __global__ void addGPUNonConsecutive(int *d_a, int *d_b, int *d_c, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= N) return; int d_a_tmp, d_b_tmp; if ((tid % 2) == 0) { d_a_tmp = d_a[tid + 1]; d_b_tmp = d_b[tid + 1]; } else { d_a_tmp = d_a[tid - 1]; d_b_tmp = d_b[tid - 1]; } d_c[tid] = d_a_tmp + d_b_tmp; } /********/ /* MAIN */ /********/ int main() { const int N = 256; // --- Allocating host memory for data and results int *h_a = (int *)malloc(N * sizeof(int)); int *h_b = (int *)malloc(N * sizeof(int)); int *h_c = (int *)malloc(N * sizeof(int)); int *h_c_device = (int *)malloc(N * sizeof(int)); // --- Allocating device memory for data and results int *d_a, *d_b, *d_c; gpuErrchk(hipMalloc(&d_a, N * sizeof(int))); gpuErrchk(hipMalloc(&d_b, N * sizeof(int))); gpuErrchk(hipMalloc(&d_c, N * sizeof(int))); // --- Filling the input vectors on host memory for (int k = 0; k < N; k++) { h_a[k] = k; h_b[k] = 2 * k; } // --- Moving data from host to device gpuErrchk(hipMemcpy(d_a, h_a, N * sizeof(int), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_b, h_b, N * sizeof(int), hipMemcpyHostToDevice)); addCPU(h_a, h_b, h_c, N); //addGPU << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, d_c, N); addGPUNonConsecutive << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, d_c, N); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipMemcpy(h_c_device, d_c, N * sizeof(int), hipMemcpyDeviceToHost)); for (int k = 0; k < N; k++) if (h_c_device[k] != h_c[k]) { printf("Host and device results do not match for k = %d: h_c[%d] = %d; h_c_device[%d] = %d\n", k, k, h_c[k], k, h_c_device[k]); } printf("No errors found.\n"); return 0; }
e74da90794de8163b9e8bbf2cd5af2e8b49b1d19.cu
#include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #define BLOCKSIZE 128 /*******************/ /* iDivUp FUNCTION */ /*******************/ int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } /********************/ /* CUDA ERROR CHECK */ /********************/ // --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); } /*******************/ /* addCPU FUNCTION */ /*******************/ void addCPU(int *h_a, int *h_b, int *h_c, int N) { int h_a_tmp, h_b_tmp; for (int k = 0; k < N; k++) { if ((k % 2) == 0) { h_a_tmp = h_a[k + 1]; h_b_tmp = h_b[k + 1]; } else { h_a_tmp = h_a[k - 1]; h_b_tmp = h_b[k - 1]; } h_c[k] = h_a_tmp + h_b_tmp; } } /***************************************************/ /* addGPU FUNCTION WITH NON-CONSECUTIVE LOAD/STORE */ /***************************************************/ __global__ void addGPUNonConsecutive(int *d_a, int *d_b, int *d_c, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= N) return; int d_a_tmp, d_b_tmp; if ((tid % 2) == 0) { d_a_tmp = d_a[tid + 1]; d_b_tmp = d_b[tid + 1]; } else { d_a_tmp = d_a[tid - 1]; d_b_tmp = d_b[tid - 1]; } d_c[tid] = d_a_tmp + d_b_tmp; } /********/ /* MAIN */ /********/ int main() { const int N = 256; // --- Allocating host memory for data and results int *h_a = (int *)malloc(N * sizeof(int)); int *h_b = (int *)malloc(N * sizeof(int)); int *h_c = (int *)malloc(N * sizeof(int)); int *h_c_device = (int *)malloc(N * sizeof(int)); // --- Allocating device memory for data and results int *d_a, *d_b, *d_c; gpuErrchk(cudaMalloc(&d_a, N * sizeof(int))); gpuErrchk(cudaMalloc(&d_b, N * sizeof(int))); gpuErrchk(cudaMalloc(&d_c, N * sizeof(int))); // --- Filling the input vectors on host memory for (int k = 0; k < N; k++) { h_a[k] = k; h_b[k] = 2 * k; } // --- Moving data from host to device gpuErrchk(cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice)); addCPU(h_a, h_b, h_c, N); //addGPU << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, d_c, N); addGPUNonConsecutive << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, d_c, N); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaMemcpy(h_c_device, d_c, N * sizeof(int), cudaMemcpyDeviceToHost)); for (int k = 0; k < N; k++) if (h_c_device[k] != h_c[k]) { printf("Host and device results do not match for k = %d: h_c[%d] = %d; h_c_device[%d] = %d\n", k, k, h_c[k], k, h_c_device[k]); } printf("No errors found.\n"); return 0; }
20422d70381b34498392db6ef6ebc3628fc51038.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <opencv2/opencv.hpp> #include "convolution.h" #include "helpers.h" using namespace std; using namespace cv; void testConvolution() { cv::Mat img = getRawImage("./Lena.pgm"); img.convertTo(img, CV_32FC1); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 2, 2, "global_only", true, "results/kernel2x2_size2x2_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 2, "global_only", true, "results/kernel2x2_size3x3_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 2, "global_only", true, "results/kernel2x2_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 2, "global_only", true, "results/kernel2x2_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 2, "global_only", true, "results/kernel2x2_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 2, "global_only", true, "results/kernel2x2_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 2, "global_only", true, "results/kernel2x2_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 3, "global_only", true, "results/kernel3x3_size3x3_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 3, "global_only", true, "results/kernel3x3_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 3, "global_only", true, "results/kernel3x3_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 3, "global_only", true, "results/kernel3x3_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 3, "global_only", true, "results/kernel3x3_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 3, "global_only", true, "results/kernel3x3_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 3, "global_only", true, "results/kernel3x3_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 4, "global_only", true, "results/kernel4x4_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 4, "global_only", true, "results/kernel4x4_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 4, "global_only", true, "results/kernel4x4_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 4, "global_only", true, "results/kernel4x4_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 4, "global_only", true, "results/kernel4x4_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 4, "global_only", true, "results/kernel4x4_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 4, "global_only", true, "results/kernel4x4_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 5, "global_only", true, "results/kernel5x5_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 5, "global_only", true, "results/kernel5x5_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 5, "global_only", true, "results/kernel5x5_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 5, "global_only", true, "results/kernel5x5_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 5, "global_only", true, "results/kernel5x5_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 5, "global_only", true, "results/kernel5x5_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 5, "global_only", true, "results/kernel5x5_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 6, "global_only", true, "results/kernel6x6_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 6, "global_only", true, "results/kernel6x6_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 6, "global_only", true, "results/kernel6x6_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 6, "global_only", true, "results/kernel6x6_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 6, "global_only", true, "results/kernel6x6_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 6, "global_only", true, "results/kernel6x6_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 12, 6, "global_only", true, "results/kernel6x6_size12x12_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 7, "global_only", true, "results/kernel7x7_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 7, "global_only", true, "results/kernel7x7_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 7, "global_only", true, "results/kernel7x7_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 7, "global_only", true, "results/kernel7x7_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 7, "global_only", true, "results/kernel7x7_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 12, 7, "global_only", true, "results/kernel7x7_size12x12_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 13, 7, "global_only", true, "results/kernel7x7_size13x13_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 2, 2, "global_register", true, "results/kernel2x2_size2x2_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 2, "global_register", true, "results/kernel2x2_size3x3_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 2, "global_register", true, "results/kernel2x2_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 2, "global_register", true, "results/kernel2x2_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 2, "global_register", true, "results/kernel2x2_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 2, "global_register", true, "results/kernel2x2_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 3, "global_register", true, "results/kernel3x3_size3x3_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 3, "global_register", true, "results/kernel3x3_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 3, "global_register", true, "results/kernel3x3_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 3, "global_register", true, "results/kernel3x3_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 3, "global_register", true, "results/kernel3x3_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 4, "global_register", true, "results/kernel4x4_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 4, "global_register", true, "results/kernel4x4_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 4, "global_register", true, "results/kernel4x4_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 4, "global_register", true, "results/kernel4x4_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 5, "global_register", true, "results/kernel5x5_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 5, "global_register", true, "results/kernel5x5_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 5, "global_register", true, "results/kernel5x5_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 6, "global_register", true, "results/kernel6x6_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 6, "global_register", true, "results/kernel6x6_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 7, "global_register", true, "results/kernel7x7_size7x7_global_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 2, 2, "texCache_only", true, "results/kernel2x2_size2x2_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 3, "texCache_only", true, "results/kernel3x3_size3x3_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 4, "texCache_only", true, "results/kernel4x4_size4x4_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 5, "texCache_only", true, "results/kernel5x5_size5x5_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 6, "texCache_only", true, "results/kernel6x6_size6x6_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 7, "texCache_only", true, "results/kernel7x7_size7x7_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 2, 2, "texCache_register", true, "results/kernel2x2_size2x2_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 2, "texCache_register", true, "results/kernel2x2_size3x3_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 2, "texCache_register", true, "results/kernel2x2_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 2, "texCache_register", true, "results/kernel2x2_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 2, "texCache_register", true, "results/kernel2x2_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 2, "texCache_register", true, "results/kernel2x2_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 3, "texCache_register", true, "results/kernel3x3_size3x3_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 3, "texCache_register", true, "results/kernel3x3_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 3, "texCache_register", true, "results/kernel3x3_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 3, "texCache_register", true, "results/kernel3x3_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 3, "texCache_register", true, "results/kernel3x3_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 4, "texCache_register", true, "results/kernel4x4_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 4, "texCache_register", true, "results/kernel4x4_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 4, "texCache_register", true, "results/kernel4x4_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 4, "texCache_register", true, "results/kernel4x4_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 5, "texCache_register", true, "results/kernel5x5_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 5, "texCache_register", true, "results/kernel5x5_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 5, "texCache_register", true, "results/kernel5x5_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 6, "texCache_register", true, "results/kernel6x6_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 6, "texCache_register", true, "results/kernel6x6_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 7, "texCache_register", true, "results/kernel7x7_size7x7_texCache_register.png"); } void testConvolution_withDummyImg(int height, int width) { float* img = getDummyImg(height, width); FILE * pFile = fopen("perf.txt", "w"); fprintf(pFile, "kernelSize amountToLoad memoryScheme responseTime\n"); int nRuns = 10; float responseTime = 0; responseTime = convolutionWrapper(img, width, height, 3, 3, "global_register", false); //warmup printf("memoryScheme = %s \n", "global_only"); for(int k=1; k<=4; k++) { int kernelSize=2*k+1; for(int sqrtConvsPerThread=1; sqrtConvsPerThread<8; sqrtConvsPerThread++) { int amountToLoad = sqrtConvsPerThread+kernelSize-1; //actually, prefetching nothing in this version responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper(img, width, height, amountToLoad, kernelSize, "global_only", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "global_only", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f ms \n", kernelSize, amountToLoad, responseTime*1000.0); hipDeviceSynchronize(); } printf("\n"); } printf("memoryScheme = %s \n", "global_register"); for(int k=1; k<=4; k++) { int kernelSize=2*k+1; for(int amountToLoad=kernelSize; amountToLoad<8; amountToLoad++) { responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper(img, width, height, amountToLoad, kernelSize, "global_register", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "global_register", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f ms \n", kernelSize, amountToLoad, responseTime*1000.0); hipDeviceSynchronize(); } printf("\n"); } printf("memoryScheme = %s \n", "texCache_only"); for(int k=1; k<=4; k++) { int kernelSize=2*k+1; int amountToLoad = kernelSize; responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper_texCache(img, width, height, amountToLoad, kernelSize, "texCache_only", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "texCache_only", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f ms \n", kernelSize, amountToLoad, responseTime*1000.0); hipDeviceSynchronize(); printf("\n"); } printf("memoryScheme = %s \n", "texCache_register"); for(int k=1; k<=4; k++) { int kernelSize=2*k+1; for(int amountToLoad=kernelSize; amountToLoad<8; amountToLoad++) { responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper_texCache(img, width, height, amountToLoad, kernelSize, "texCache_register", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "texCache_register", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f ms \n", kernelSize, amountToLoad, responseTime*1000.0); hipDeviceSynchronize(); } printf("\n"); } fclose(pFile); }
20422d70381b34498392db6ef6ebc3628fc51038.cu
#include "cuda.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <opencv2/opencv.hpp> #include "convolution.h" #include "helpers.h" using namespace std; using namespace cv; void testConvolution() { cv::Mat img = getRawImage("./Lena.pgm"); img.convertTo(img, CV_32FC1); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 2, 2, "global_only", true, "results/kernel2x2_size2x2_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 2, "global_only", true, "results/kernel2x2_size3x3_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 2, "global_only", true, "results/kernel2x2_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 2, "global_only", true, "results/kernel2x2_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 2, "global_only", true, "results/kernel2x2_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 2, "global_only", true, "results/kernel2x2_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 2, "global_only", true, "results/kernel2x2_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 3, "global_only", true, "results/kernel3x3_size3x3_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 3, "global_only", true, "results/kernel3x3_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 3, "global_only", true, "results/kernel3x3_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 3, "global_only", true, "results/kernel3x3_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 3, "global_only", true, "results/kernel3x3_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 3, "global_only", true, "results/kernel3x3_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 3, "global_only", true, "results/kernel3x3_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 4, "global_only", true, "results/kernel4x4_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 4, "global_only", true, "results/kernel4x4_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 4, "global_only", true, "results/kernel4x4_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 4, "global_only", true, "results/kernel4x4_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 4, "global_only", true, "results/kernel4x4_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 4, "global_only", true, "results/kernel4x4_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 4, "global_only", true, "results/kernel4x4_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 5, "global_only", true, "results/kernel5x5_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 5, "global_only", true, "results/kernel5x5_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 5, "global_only", true, "results/kernel5x5_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 5, "global_only", true, "results/kernel5x5_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 5, "global_only", true, "results/kernel5x5_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 5, "global_only", true, "results/kernel5x5_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 5, "global_only", true, "results/kernel5x5_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 6, "global_only", true, "results/kernel6x6_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 6, "global_only", true, "results/kernel6x6_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 6, "global_only", true, "results/kernel6x6_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 6, "global_only", true, "results/kernel6x6_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 6, "global_only", true, "results/kernel6x6_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 6, "global_only", true, "results/kernel6x6_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 12, 6, "global_only", true, "results/kernel6x6_size12x12_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 7, "global_only", true, "results/kernel7x7_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 7, "global_only", true, "results/kernel7x7_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 7, "global_only", true, "results/kernel7x7_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 7, "global_only", true, "results/kernel7x7_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 7, "global_only", true, "results/kernel7x7_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 12, 7, "global_only", true, "results/kernel7x7_size12x12_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 13, 7, "global_only", true, "results/kernel7x7_size13x13_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 2, 2, "global_register", true, "results/kernel2x2_size2x2_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 2, "global_register", true, "results/kernel2x2_size3x3_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 2, "global_register", true, "results/kernel2x2_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 2, "global_register", true, "results/kernel2x2_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 2, "global_register", true, "results/kernel2x2_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 2, "global_register", true, "results/kernel2x2_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 3, "global_register", true, "results/kernel3x3_size3x3_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 3, "global_register", true, "results/kernel3x3_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 3, "global_register", true, "results/kernel3x3_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 3, "global_register", true, "results/kernel3x3_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 3, "global_register", true, "results/kernel3x3_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 4, "global_register", true, "results/kernel4x4_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 4, "global_register", true, "results/kernel4x4_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 4, "global_register", true, "results/kernel4x4_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 4, "global_register", true, "results/kernel4x4_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 5, "global_register", true, "results/kernel5x5_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 5, "global_register", true, "results/kernel5x5_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 5, "global_register", true, "results/kernel5x5_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 6, "global_register", true, "results/kernel6x6_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 6, "global_register", true, "results/kernel6x6_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 7, "global_register", true, "results/kernel7x7_size7x7_global_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 2, 2, "texCache_only", true, "results/kernel2x2_size2x2_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 3, "texCache_only", true, "results/kernel3x3_size3x3_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 4, "texCache_only", true, "results/kernel4x4_size4x4_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 5, "texCache_only", true, "results/kernel5x5_size5x5_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 6, "texCache_only", true, "results/kernel6x6_size6x6_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 7, "texCache_only", true, "results/kernel7x7_size7x7_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 2, 2, "texCache_register", true, "results/kernel2x2_size2x2_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 2, "texCache_register", true, "results/kernel2x2_size3x3_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 2, "texCache_register", true, "results/kernel2x2_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 2, "texCache_register", true, "results/kernel2x2_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 2, "texCache_register", true, "results/kernel2x2_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 2, "texCache_register", true, "results/kernel2x2_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 3, "texCache_register", true, "results/kernel3x3_size3x3_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 3, "texCache_register", true, "results/kernel3x3_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 3, "texCache_register", true, "results/kernel3x3_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 3, "texCache_register", true, "results/kernel3x3_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 3, "texCache_register", true, "results/kernel3x3_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 4, "texCache_register", true, "results/kernel4x4_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 4, "texCache_register", true, "results/kernel4x4_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 4, "texCache_register", true, "results/kernel4x4_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 4, "texCache_register", true, "results/kernel4x4_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 5, "texCache_register", true, "results/kernel5x5_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 5, "texCache_register", true, "results/kernel5x5_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 5, "texCache_register", true, "results/kernel5x5_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 6, "texCache_register", true, "results/kernel6x6_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 6, "texCache_register", true, "results/kernel6x6_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 7, "texCache_register", true, "results/kernel7x7_size7x7_texCache_register.png"); } void testConvolution_withDummyImg(int height, int width) { float* img = getDummyImg(height, width); FILE * pFile = fopen("perf.txt", "w"); fprintf(pFile, "kernelSize amountToLoad memoryScheme responseTime\n"); int nRuns = 10; float responseTime = 0; responseTime = convolutionWrapper(img, width, height, 3, 3, "global_register", false); //warmup printf("memoryScheme = %s \n", "global_only"); for(int k=1; k<=4; k++) { int kernelSize=2*k+1; for(int sqrtConvsPerThread=1; sqrtConvsPerThread<8; sqrtConvsPerThread++) { int amountToLoad = sqrtConvsPerThread+kernelSize-1; //actually, prefetching nothing in this version responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper(img, width, height, amountToLoad, kernelSize, "global_only", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "global_only", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f ms \n", kernelSize, amountToLoad, responseTime*1000.0); cudaDeviceSynchronize(); } printf("\n"); } printf("memoryScheme = %s \n", "global_register"); for(int k=1; k<=4; k++) { int kernelSize=2*k+1; for(int amountToLoad=kernelSize; amountToLoad<8; amountToLoad++) { responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper(img, width, height, amountToLoad, kernelSize, "global_register", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "global_register", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f ms \n", kernelSize, amountToLoad, responseTime*1000.0); cudaDeviceSynchronize(); } printf("\n"); } printf("memoryScheme = %s \n", "texCache_only"); for(int k=1; k<=4; k++) { int kernelSize=2*k+1; int amountToLoad = kernelSize; responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper_texCache(img, width, height, amountToLoad, kernelSize, "texCache_only", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "texCache_only", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f ms \n", kernelSize, amountToLoad, responseTime*1000.0); cudaDeviceSynchronize(); printf("\n"); } printf("memoryScheme = %s \n", "texCache_register"); for(int k=1; k<=4; k++) { int kernelSize=2*k+1; for(int amountToLoad=kernelSize; amountToLoad<8; amountToLoad++) { responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper_texCache(img, width, height, amountToLoad, kernelSize, "texCache_register", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "texCache_register", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f ms \n", kernelSize, amountToLoad, responseTime*1000.0); cudaDeviceSynchronize(); } printf("\n"); } fclose(pFile); }
b0005aa642d37c6505c29e87ae88ff93b38b7f23.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void hgemm_tn_128x64( unsigned short* param_C, const unsigned short* param_A, const unsigned short* param_B, float param_alpha, float param_beta, int param_flags, int param_lda, int param_ldb, int param_ldc, int param_m, int param_n, int param_k, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[128*8*2 + 64*8*2 + 4]; *param_C = share[0]; }
b0005aa642d37c6505c29e87ae88ff93b38b7f23.cu
extern "C" __global__ void hgemm_tn_128x64( unsigned short* param_C, const unsigned short* param_A, const unsigned short* param_B, float param_alpha, float param_beta, int param_flags, int param_lda, int param_ldb, int param_ldc, int param_m, int param_n, int param_k, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[128*8*2 + 64*8*2 + 4]; *param_C = share[0]; }
715801997bfca530a1a389c0e105669d03ebd724.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "im2col.h" void THNN_CudaSpatialConvolutionMM_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) { THAssert(THCudaTensor_checkGPU(state, 6, input, output, weight, bias, columns, ones)); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(weight->size[0] == bias->size[0], 4, "nOutputPlane mismatch in weight and bias"); // Params: int nInputPlane = weight->size[1]/(kH*kW); int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { THArgCheck(input->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match"); // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); } else { THArgCheck(input->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match"); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth); // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets increased, // and always contains ones. if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *output_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, output_n, output, 0, elt); // Do Bias first: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long n_ = outputHeight * outputWidth; long k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 't', 'n', n_, m_, k_, 1, THCudaTensor_data(state, ones), k_, THCudaTensor_data(state, bias), k_, 0, THCudaTensor_data(state, output_n), n_ ); // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = columns->size[1]; long k = nInputPlane*kH*kW; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 'n', 'n', n, m, k, 1, THCudaTensor_data(state, columns), n, THCudaTensor_data(state, weight), k, 1, THCudaTensor_data(state, output_n), n ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, output_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, output, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialConvolutionMM_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *gradColumns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) { THAssert(THCudaTensor_checkGPU(state, 5, input, gradOutput, weight, gradColumns, gradInput)); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(weight->size[0] == bias->size[0], 4, "nOutputPlane mismatch in weight and bias"); // Params int nInputPlane = weight->size[1]/(kW*kH); int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth); // Resize temporary columns THCudaTensor_resize2d(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *gradInput_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: THCudaTensor_select(state, gradInput_n, gradInput, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nInputPlane*kW*kH; long n = gradColumns->size[1]; long k = nOutputPlane; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 'n', 't', n, m, k, 1, THCudaTensor_data(state, gradOutput_n), n, THCudaTensor_data(state, weight), m, 0, THCudaTensor_data(state, gradColumns), n ); // Unpack columns back into input: col2im( THCState_getCurrentStream(state), THCudaTensor_data(state, gradColumns), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, THCudaTensor_data(state, gradInput_n) ); } // Free THCudaTensor_free(state, gradInput_n); THCudaTensor_free(state, gradOutput_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); THCudaTensor_resize3d(state, gradInput, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialConvolutionMM_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, float scale) { THAssert(THCudaTensor_checkGPU(state, 6, input, gradOutput, gradWeight, gradBias, columns, ones)); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(gradWeight->nDimension == 2, 4, "gradWeight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(gradWeight->size[0] == gradBias->size[0], 4, "nOutputPlane mismatch in gradWeight and gradBias"); // Params int nInputPlane = gradWeight->size[1]/(kW*kH); int nOutputPlane = gradWeight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Define a buffer of ones, for bias accumulation if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = nInputPlane*kW*kH; long k = columns->size[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 't', 'n', n, m, k, scale, THCudaTensor_data(state, columns), k, THCudaTensor_data(state, gradOutput_n), k, 1, THCudaTensor_data(state, gradWeight), n ); // Do Bias: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long k_ = outputHeight * outputWidth; // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) THCudaBlas_gemv( state, 't', k_, m_, scale, THCudaTensor_data(state, gradOutput_n), k_, THCudaTensor_data(state, ones), 1, 1, THCudaTensor_data(state, gradBias), 1 ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, gradOutput_n); // Resize if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } }
715801997bfca530a1a389c0e105669d03ebd724.cu
#include "THCUNN.h" #include "im2col.h" void THNN_CudaSpatialConvolutionMM_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) { THAssert(THCudaTensor_checkGPU(state, 6, input, output, weight, bias, columns, ones)); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(weight->size[0] == bias->size[0], 4, "nOutputPlane mismatch in weight and bias"); // Params: int nInputPlane = weight->size[1]/(kH*kW); int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { THArgCheck(input->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match"); // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); } else { THArgCheck(input->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match"); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth); // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets increased, // and always contains ones. if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *output_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, output_n, output, 0, elt); // Do Bias first: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long n_ = outputHeight * outputWidth; long k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 't', 'n', n_, m_, k_, 1, THCudaTensor_data(state, ones), k_, THCudaTensor_data(state, bias), k_, 0, THCudaTensor_data(state, output_n), n_ ); // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = columns->size[1]; long k = nInputPlane*kH*kW; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 'n', 'n', n, m, k, 1, THCudaTensor_data(state, columns), n, THCudaTensor_data(state, weight), k, 1, THCudaTensor_data(state, output_n), n ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, output_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, output, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialConvolutionMM_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *gradColumns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH) { THAssert(THCudaTensor_checkGPU(state, 5, input, gradOutput, weight, gradColumns, gradInput)); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(weight->nDimension == 2, 4, "weight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(weight->size[0] == bias->size[0], 4, "nOutputPlane mismatch in weight and bias"); // Params int nInputPlane = weight->size[1]/(kW*kH); int nOutputPlane = weight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize4d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth); // Resize temporary columns THCudaTensor_resize2d(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *gradInput_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: THCudaTensor_select(state, gradInput_n, gradInput, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nInputPlane*kW*kH; long n = gradColumns->size[1]; long k = nOutputPlane; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 'n', 't', n, m, k, 1, THCudaTensor_data(state, gradOutput_n), n, THCudaTensor_data(state, weight), m, 0, THCudaTensor_data(state, gradColumns), n ); // Unpack columns back into input: col2im( THCState_getCurrentStream(state), THCudaTensor_data(state, gradColumns), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, THCudaTensor_data(state, gradInput_n) ); } // Free THCudaTensor_free(state, gradInput_n); THCudaTensor_free(state, gradOutput_n); // Resize output if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); THCudaTensor_resize3d(state, gradInput, nInputPlane, inputHeight, inputWidth); } } void THNN_CudaSpatialConvolutionMM_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, THCudaTensor *columns, THCudaTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, float scale) { THAssert(THCudaTensor_checkGPU(state, 6, input, gradOutput, gradWeight, gradBias, columns, ones)); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); THArgCheck(gradWeight->nDimension == 2, 4, "gradWeight tensor must be 2D (nOutputPlane,nInputPlane*kH*kW)"); THArgCheck(gradWeight->size[0] == gradBias->size[0], 4, "nOutputPlane mismatch in gradWeight and gradBias"); // Params int nInputPlane = gradWeight->size[1]/(kW*kH); int nOutputPlane = gradWeight->size[0]; int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]); THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes long batchSize = input->size[0]; // Define a buffer of ones, for bias accumulation if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize2d(state, ones, outputHeight, outputWidth); THCudaTensor_fill(state, ones, 1); } // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // Extract columns: im2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = nOutputPlane; long n = nInputPlane*kW*kH; long k = columns->size[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_gemm( state, 't', 'n', n, m, k, scale, THCudaTensor_data(state, columns), k, THCudaTensor_data(state, gradOutput_n), k, 1, THCudaTensor_data(state, gradWeight), n ); // Do Bias: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long k_ = outputHeight * outputWidth; // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) THCudaBlas_gemv( state, 't', k_, m_, scale, THCudaTensor_data(state, gradOutput_n), k_, THCudaTensor_data(state, ones), 1, 1, THCudaTensor_data(state, gradBias), 1 ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, gradOutput_n); // Resize if (batch == 0) { THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth); } }
1c0037cc93e2bab9b32120435f1cdc36f4d9d02d.hip
// !!! This is a file automatically generated by hipify!!! /***************************************************** * * Gaussian elimination * * Sequential version * *****************************************************/ // Compile and then... // Example run 1: gauseq.exe -P 1 -I fast -n 16 // Example run 2: gauseq.exe -P 0 -I rand -n 2048 #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string> #define MAX_SIZE 512 // If you put lower NUM_THREADS than BLOCK_SIZE, it will run with BLOCK_SIZE number of threads #define NUM_THREADS 512 #define BLOCK_SIZE 128 int N; /* matrix size */ int maxnum; /* max number of element */ const char *Init; /* matrix init type */ int PRINT; /* print switch */ double* A = new double[MAX_SIZE * MAX_SIZE]; /* matrix A */ double* y = new double[MAX_SIZE]; /* vector y */ /* forward declarations */ void work_gpu(void); void Init_Matrix(void); void Print_Matrix(void); void Init_Default(void); void Init_Test(void); int Read_Options(int, char **); int matrixAt(int x, int y); __device__ int d_matrixAt(int x, int y) { return x * MAX_SIZE + y; } __global__ void GausElimination(double A[MAX_SIZE * MAX_SIZE], double y[MAX_SIZE], int k, int threads) { // Shared memory for the k:th row __shared__ double k_row[MAX_SIZE]; int thread = threadIdx.x; // A whole block load in shared memory while (thread < MAX_SIZE) { k_row[thread] = A[d_matrixAt(thread, k)]; thread += BLOCK_SIZE; } // Wait for all threads to allocate shared memory __syncthreads(); int idx = threadIdx.x + blockDim.x * blockIdx.x; int j = idx + k + 1; double p = k_row[k]; // When k increases, we need to use less threads. while (j < MAX_SIZE) { // factor for a row to divide with double f = A[d_matrixAt(k, j)] / p; for (int i = k; i < MAX_SIZE; i++) { A[d_matrixAt(i, j)] = A[d_matrixAt(i, j)] - k_row[i] * f; } y[j] = y[j] - y[k] * f; j += threads; } } __global__ void GausDivide(double A[MAX_SIZE * MAX_SIZE], double y[MAX_SIZE], int k, int threads) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int i = idx + k; double p = A[d_matrixAt(k, k)]; // Let one thread divide the y value if (idx == 0) { y[k] = y[k] / p; } // thread coarsning while (i < MAX_SIZE) { // Divide the k row with its pivot A[d_matrixAt(i, k)] = A[d_matrixAt(i, k)] / p; i += threads; } } void GausBack() { // Backpropegate to get y for (int k = MAX_SIZE - 1; k >= 0; k--) { for (int j = k - 1; j >= 0; j--) { y[j] = y[j] - y[k] * A[matrixAt(k, j)]; A[matrixAt(k, j)] = 0.0; } } } int matrixAt(int x, int y) { return x * MAX_SIZE + y; } void work_cpu() { for (int k = 0; k < MAX_SIZE; k++) { // Save the pivot value float p = A[matrixAt(k, k)]; // (i, j) is (x,y) for (int j = k + 1; j < MAX_SIZE; j++) { // factor for a row to divide with float f = A[matrixAt(k, j)] / p; for (int i = k; i < MAX_SIZE; i++) { A[matrixAt(i, j)] = A[matrixAt(i, j)] - A[matrixAt(i, k)] * f; } y[j] = y[j] - y[k] * f; } // Divide the k row with its pivot for (int i = k; i < MAX_SIZE; i++) { A[matrixAt(i, k)] = A[matrixAt(i, k)] / p; } y[k] = y[k] / p; } } void work_gpu(void) { double* d_A; double* d_y; int sizeY = sizeof(double) * MAX_SIZE; int sizeA = sizeY * MAX_SIZE; hipMalloc((void**)&d_A, sizeA); hipMalloc((void**)&d_y, sizeY); hipMemcpy(d_A, A, sizeA, hipMemcpyHostToDevice); hipMemcpy(d_y, y, sizeY, hipMemcpyHostToDevice); int threads = ceil((float)NUM_THREADS / (float)BLOCK_SIZE) * BLOCK_SIZE; // Gaussian elimination algorithm for (int k = 0; k < MAX_SIZE; k++) // Outer loop { // Max number of blocks is 65535 so our application can't handle N > 65536 GausElimination << <ceil((float)NUM_THREADS / (float)BLOCK_SIZE), BLOCK_SIZE >> > (d_A, d_y, k, threads); GausDivide << < ceil((float)NUM_THREADS / (float)BLOCK_SIZE), BLOCK_SIZE >> > (d_A, d_y, k, threads); } hipMemcpy(A, d_A, sizeA, hipMemcpyDeviceToHost); hipMemcpy(y, d_y, sizeY, hipMemcpyDeviceToHost); } int main(int argc, char **argv) { int i, timestart, timeend, iter; hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); Init_Default(); /* Init default values */ Read_Options(argc, argv); /* Read arguments */ Init_Matrix(); /* Init the matrix */ //work_cpu(); work_gpu(); // For fun, not really necessary but we get the identity matrix //GausBack(); /* printf("Y: ["); for (int i = 0; i < N; i++) { printf("%5.2f, ", y[i]); } printf("]"); */ if (PRINT == 1) Print_Matrix(); } void Init_Matrix() { int i, j; /* printf("\nsize = %dx%d ", N, N); printf("\nmaxnum = %d \n", maxnum); printf("Init = %s \n", Init); printf("Initializing matrix..."); */ if (strcmp(Init, "rand") == 0) { for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (i == j) /* diagonal dominance */ A[matrixAt(j, i)] = (double)(rand() % maxnum) + 5.0; else A[matrixAt(j, i)] = (double)(rand() % maxnum) + 1.0; } } } if (strcmp(Init, "fast") == 0) { for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (i == j) /* diagonal dominance */ A[matrixAt(j, i)] = 5.0; else A[matrixAt(j ,i)] = 2.0; } } } /* Initialize vectors b and y */ for (i = 0; i < N; i++) { y[i] = 2.0; } //printf("done \n\n"); if (PRINT == 1) Print_Matrix(); } void Print_Matrix() { int i, j; bool printA = false; if (printA) { printf("Matrix A:\n"); for (i = 0; i < N; i++) { printf("["); for (j = 0; j < N; j++) printf(" %5.2f,", A[matrixAt(j, i)]); printf("]\n"); } } printf("Vector y:\n["); for (j = 0; j < N; j++) printf(" %5.2f,", y[j]); printf("]\n"); printf("\n\n"); } void Init_Default() { N = MAX_SIZE; Init = "rand"; maxnum = 15.0; PRINT = 1; } int Read_Options(int argc, char **argv) { char *prog; prog = *argv; while (++argv, --argc > 0) if (**argv == '-') switch (*++*argv) { case 'n': --argc; N = atoi(*++argv); break; case 'h': printf("\nHELP: try sor -u \n\n"); exit(0); break; case 'u': printf("\nUsage: sor [-n problemsize]\n"); printf(" [-D] show default values \n"); printf(" [-h] help \n"); printf(" [-I init_type] fast/rand \n"); printf(" [-m maxnum] max random no \n"); printf(" [-P print_switch] 0/1 \n"); exit(0); break; case 'D': printf("\nDefault: n = %d ", N); printf("\n Init = rand"); printf("\n maxnum = 5 "); printf("\n P = 0 \n\n"); exit(0); break; case 'I': --argc; Init = *++argv; break; case 'm': --argc; maxnum = atoi(*++argv); break; case 'P': --argc; PRINT = atoi(*++argv); break; default: printf("%s: ignored option: -%s\n", prog, *argv); printf("HELP: try %s -u \n\n", prog); break; } }
1c0037cc93e2bab9b32120435f1cdc36f4d9d02d.cu
/***************************************************** * * Gaussian elimination * * Sequential version * *****************************************************/ // Compile and then... // Example run 1: gauseq.exe -P 1 -I fast -n 16 // Example run 2: gauseq.exe -P 0 -I rand -n 2048 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string> #define MAX_SIZE 512 // If you put lower NUM_THREADS than BLOCK_SIZE, it will run with BLOCK_SIZE number of threads #define NUM_THREADS 512 #define BLOCK_SIZE 128 int N; /* matrix size */ int maxnum; /* max number of element */ const char *Init; /* matrix init type */ int PRINT; /* print switch */ double* A = new double[MAX_SIZE * MAX_SIZE]; /* matrix A */ double* y = new double[MAX_SIZE]; /* vector y */ /* forward declarations */ void work_gpu(void); void Init_Matrix(void); void Print_Matrix(void); void Init_Default(void); void Init_Test(void); int Read_Options(int, char **); int matrixAt(int x, int y); __device__ int d_matrixAt(int x, int y) { return x * MAX_SIZE + y; } __global__ void GausElimination(double A[MAX_SIZE * MAX_SIZE], double y[MAX_SIZE], int k, int threads) { // Shared memory for the k:th row __shared__ double k_row[MAX_SIZE]; int thread = threadIdx.x; // A whole block load in shared memory while (thread < MAX_SIZE) { k_row[thread] = A[d_matrixAt(thread, k)]; thread += BLOCK_SIZE; } // Wait for all threads to allocate shared memory __syncthreads(); int idx = threadIdx.x + blockDim.x * blockIdx.x; int j = idx + k + 1; double p = k_row[k]; // When k increases, we need to use less threads. while (j < MAX_SIZE) { // factor for a row to divide with double f = A[d_matrixAt(k, j)] / p; for (int i = k; i < MAX_SIZE; i++) { A[d_matrixAt(i, j)] = A[d_matrixAt(i, j)] - k_row[i] * f; } y[j] = y[j] - y[k] * f; j += threads; } } __global__ void GausDivide(double A[MAX_SIZE * MAX_SIZE], double y[MAX_SIZE], int k, int threads) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int i = idx + k; double p = A[d_matrixAt(k, k)]; // Let one thread divide the y value if (idx == 0) { y[k] = y[k] / p; } // thread coarsning while (i < MAX_SIZE) { // Divide the k row with its pivot A[d_matrixAt(i, k)] = A[d_matrixAt(i, k)] / p; i += threads; } } void GausBack() { // Backpropegate to get y for (int k = MAX_SIZE - 1; k >= 0; k--) { for (int j = k - 1; j >= 0; j--) { y[j] = y[j] - y[k] * A[matrixAt(k, j)]; A[matrixAt(k, j)] = 0.0; } } } int matrixAt(int x, int y) { return x * MAX_SIZE + y; } void work_cpu() { for (int k = 0; k < MAX_SIZE; k++) { // Save the pivot value float p = A[matrixAt(k, k)]; // (i, j) is (x,y) for (int j = k + 1; j < MAX_SIZE; j++) { // factor for a row to divide with float f = A[matrixAt(k, j)] / p; for (int i = k; i < MAX_SIZE; i++) { A[matrixAt(i, j)] = A[matrixAt(i, j)] - A[matrixAt(i, k)] * f; } y[j] = y[j] - y[k] * f; } // Divide the k row with its pivot for (int i = k; i < MAX_SIZE; i++) { A[matrixAt(i, k)] = A[matrixAt(i, k)] / p; } y[k] = y[k] / p; } } void work_gpu(void) { double* d_A; double* d_y; int sizeY = sizeof(double) * MAX_SIZE; int sizeA = sizeY * MAX_SIZE; cudaMalloc((void**)&d_A, sizeA); cudaMalloc((void**)&d_y, sizeY); cudaMemcpy(d_A, A, sizeA, cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, sizeY, cudaMemcpyHostToDevice); int threads = ceil((float)NUM_THREADS / (float)BLOCK_SIZE) * BLOCK_SIZE; // Gaussian elimination algorithm for (int k = 0; k < MAX_SIZE; k++) // Outer loop { // Max number of blocks is 65535 so our application can't handle N > 65536 GausElimination << <ceil((float)NUM_THREADS / (float)BLOCK_SIZE), BLOCK_SIZE >> > (d_A, d_y, k, threads); GausDivide << < ceil((float)NUM_THREADS / (float)BLOCK_SIZE), BLOCK_SIZE >> > (d_A, d_y, k, threads); } cudaMemcpy(A, d_A, sizeA, cudaMemcpyDeviceToHost); cudaMemcpy(y, d_y, sizeY, cudaMemcpyDeviceToHost); } int main(int argc, char **argv) { int i, timestart, timeend, iter; cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); Init_Default(); /* Init default values */ Read_Options(argc, argv); /* Read arguments */ Init_Matrix(); /* Init the matrix */ //work_cpu(); work_gpu(); // For fun, not really necessary but we get the identity matrix //GausBack(); /* printf("Y: ["); for (int i = 0; i < N; i++) { printf("%5.2f, ", y[i]); } printf("]"); */ if (PRINT == 1) Print_Matrix(); } void Init_Matrix() { int i, j; /* printf("\nsize = %dx%d ", N, N); printf("\nmaxnum = %d \n", maxnum); printf("Init = %s \n", Init); printf("Initializing matrix..."); */ if (strcmp(Init, "rand") == 0) { for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (i == j) /* diagonal dominance */ A[matrixAt(j, i)] = (double)(rand() % maxnum) + 5.0; else A[matrixAt(j, i)] = (double)(rand() % maxnum) + 1.0; } } } if (strcmp(Init, "fast") == 0) { for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (i == j) /* diagonal dominance */ A[matrixAt(j, i)] = 5.0; else A[matrixAt(j ,i)] = 2.0; } } } /* Initialize vectors b and y */ for (i = 0; i < N; i++) { y[i] = 2.0; } //printf("done \n\n"); if (PRINT == 1) Print_Matrix(); } void Print_Matrix() { int i, j; bool printA = false; if (printA) { printf("Matrix A:\n"); for (i = 0; i < N; i++) { printf("["); for (j = 0; j < N; j++) printf(" %5.2f,", A[matrixAt(j, i)]); printf("]\n"); } } printf("Vector y:\n["); for (j = 0; j < N; j++) printf(" %5.2f,", y[j]); printf("]\n"); printf("\n\n"); } void Init_Default() { N = MAX_SIZE; Init = "rand"; maxnum = 15.0; PRINT = 1; } int Read_Options(int argc, char **argv) { char *prog; prog = *argv; while (++argv, --argc > 0) if (**argv == '-') switch (*++*argv) { case 'n': --argc; N = atoi(*++argv); break; case 'h': printf("\nHELP: try sor -u \n\n"); exit(0); break; case 'u': printf("\nUsage: sor [-n problemsize]\n"); printf(" [-D] show default values \n"); printf(" [-h] help \n"); printf(" [-I init_type] fast/rand \n"); printf(" [-m maxnum] max random no \n"); printf(" [-P print_switch] 0/1 \n"); exit(0); break; case 'D': printf("\nDefault: n = %d ", N); printf("\n Init = rand"); printf("\n maxnum = 5 "); printf("\n P = 0 \n\n"); exit(0); break; case 'I': --argc; Init = *++argv; break; case 'm': --argc; maxnum = atoi(*++argv); break; case 'P': --argc; PRINT = atoi(*++argv); break; default: printf("%s: ignored option: -%s\n", prog, *argv); printf("HELP: try %s -u \n\n", prog); break; } }
3122e537bc29a4731b3f6499c65e131776d7812c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "updateTau.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int nbrOfGrids = 1; const double *d_u1 = NULL; hipMalloc(&d_u1, XSIZE*YSIZE); const double *d_u2 = NULL; hipMalloc(&d_u2, XSIZE*YSIZE); const double *d_u3 = NULL; hipMalloc(&d_u3, XSIZE*YSIZE); const double *d_gama = NULL; hipMalloc(&d_gama, XSIZE*YSIZE); double *d_cMax = NULL; hipMalloc(&d_cMax, XSIZE*YSIZE); const double *d_h = NULL; hipMalloc(&d_h, XSIZE*YSIZE); const double *d_cfl = NULL; hipMalloc(&d_cfl, XSIZE*YSIZE); double *d_tau = NULL; hipMalloc(&d_tau, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( updateTau), dim3(gridBlock),dim3(threadBlock), 0, 0, nbrOfGrids,d_u1,d_u2,d_u3,d_gama,d_cMax,d_h,d_cfl,d_tau); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( updateTau), dim3(gridBlock),dim3(threadBlock), 0, 0, nbrOfGrids,d_u1,d_u2,d_u3,d_gama,d_cMax,d_h,d_cfl,d_tau); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( updateTau), dim3(gridBlock),dim3(threadBlock), 0, 0, nbrOfGrids,d_u1,d_u2,d_u3,d_gama,d_cMax,d_h,d_cfl,d_tau); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3122e537bc29a4731b3f6499c65e131776d7812c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "updateTau.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int nbrOfGrids = 1; const double *d_u1 = NULL; cudaMalloc(&d_u1, XSIZE*YSIZE); const double *d_u2 = NULL; cudaMalloc(&d_u2, XSIZE*YSIZE); const double *d_u3 = NULL; cudaMalloc(&d_u3, XSIZE*YSIZE); const double *d_gama = NULL; cudaMalloc(&d_gama, XSIZE*YSIZE); double *d_cMax = NULL; cudaMalloc(&d_cMax, XSIZE*YSIZE); const double *d_h = NULL; cudaMalloc(&d_h, XSIZE*YSIZE); const double *d_cfl = NULL; cudaMalloc(&d_cfl, XSIZE*YSIZE); double *d_tau = NULL; cudaMalloc(&d_tau, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); updateTau<<<gridBlock,threadBlock>>>(nbrOfGrids,d_u1,d_u2,d_u3,d_gama,d_cMax,d_h,d_cfl,d_tau); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { updateTau<<<gridBlock,threadBlock>>>(nbrOfGrids,d_u1,d_u2,d_u3,d_gama,d_cMax,d_h,d_cfl,d_tau); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { updateTau<<<gridBlock,threadBlock>>>(nbrOfGrids,d_u1,d_u2,d_u3,d_gama,d_cMax,d_h,d_cfl,d_tau); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
650efa1e89546de534ef8d522f331d09fdc99b2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <helper_cuda.h> #include "convolutionFFT2D_common.h" #include "convolutionFFT2D.cuh" //////////////////////////////////////////////////////////////////////////////// /// Position convolution kernel center at (0, 0) in the image //////////////////////////////////////////////////////////////////////////////// extern "C" void padKernel( float *d_Dst, float *d_Src, int fftH, int fftW, int kernelH, int kernelW, int kernelY, int kernelX ) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(kernelW, threads.x), iDivUp(kernelH, threads.y)); SET_FLOAT_BASE; hipLaunchKernelGGL(( padKernel_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, fftH, fftW, kernelH, kernelW, kernelY, kernelX ); getLastCudaError("padKernel_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Prepare data for "pad to border" addressing mode //////////////////////////////////////////////////////////////////////////////// extern "C" void padDataClampToBorder( float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelW, int kernelH, int kernelY, int kernelX ) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(fftW, threads.x), iDivUp(fftH, threads.y)); SET_FLOAT_BASE; hipLaunchKernelGGL(( padDataClampToBorder_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX ); getLastCudaError("padDataClampToBorder_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Modulate Fourier image of padded data by Fourier image of padded kernel // and normalize by FFT size //////////////////////////////////////////////////////////////////////////////// extern "C" void modulateAndNormalize( fComplex *d_Dst, fComplex *d_Src, int fftH, int fftW, int padding ) { assert(fftW % 2 == 0); const int dataSize = fftH * (fftW / 2 + padding); hipLaunchKernelGGL(( modulateAndNormalize_kernel), dim3(iDivUp(dataSize, 256)), dim3(256), 0, 0, d_Dst, d_Src, dataSize, 1.0f / (float)(fftW *fftH) ); getLastCudaError("modulateAndNormalize() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // 2D R2C / C2R post/preprocessing kernels //////////////////////////////////////////////////////////////////////////////// static const double PI = 3.1415926535897932384626433832795; //static const uint BLOCKDIM = 1024; static const uint BLOCKDIM = 512; //static const uint BLOCKDIM = 256; //static const uint BLOCKDIM = 128; //static const uint BLOCKDIM = 64; //static const uint BLOCKDIM = 32; //static const uint BLOCKDIM = 16; extern "C" void spPostprocess2D( void *d_Dst, void *d_Src, uint DY, uint DX, uint padding, int dir ) { assert(d_Src != d_Dst); assert(DX % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = DY * (DX / 2); const double phaseBase = dir * PI / (double)DX; SET_FCOMPLEX_BASE; hipLaunchKernelGGL(( spPostprocess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0, (fComplex *)d_Dst, (fComplex *)d_Src, DY, DX, threadCount, padding, (float)phaseBase ); getLastCudaError("spPostprocess2D_kernel<<<>>> execution failed\n"); } extern "C" void spPreprocess2D( void *d_Dst, void *d_Src, uint DY, uint DX, uint padding, int dir ) { assert(d_Src != d_Dst); assert(DX % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = DY * (DX / 2); const double phaseBase = -dir * PI / (double)DX; SET_FCOMPLEX_BASE; hipLaunchKernelGGL(( spPreprocess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0, (fComplex *)d_Dst, (fComplex *)d_Src, DY, DX, threadCount, padding, (float)phaseBase ); getLastCudaError("spPreprocess2D_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Combined spPostprocess2D + modulateAndNormalize + spPreprocess2D //////////////////////////////////////////////////////////////////////////////// extern "C" void spProcess2D( void *d_Dst, void *d_SrcA, void *d_SrcB, uint DY, uint DX, int dir ) { assert(DY % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = (DY / 2) * DX; const double phaseBase = dir * PI / (double)DX; SET_FCOMPLEX_BASE_A; SET_FCOMPLEX_BASE_B; hipLaunchKernelGGL(( spProcess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0, (fComplex *)d_Dst, (fComplex *)d_SrcA, (fComplex *)d_SrcB, DY, DX, threadCount, (float)phaseBase, 0.5f / (float)(DY *DX) ); getLastCudaError("spProcess2D_kernel<<<>>> execution failed\n"); }
650efa1e89546de534ef8d522f331d09fdc99b2b.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <helper_cuda.h> #include "convolutionFFT2D_common.h" #include "convolutionFFT2D.cuh" //////////////////////////////////////////////////////////////////////////////// /// Position convolution kernel center at (0, 0) in the image //////////////////////////////////////////////////////////////////////////////// extern "C" void padKernel( float *d_Dst, float *d_Src, int fftH, int fftW, int kernelH, int kernelW, int kernelY, int kernelX ) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(kernelW, threads.x), iDivUp(kernelH, threads.y)); SET_FLOAT_BASE; padKernel_kernel<<<grid, threads>>>( d_Dst, d_Src, fftH, fftW, kernelH, kernelW, kernelY, kernelX ); getLastCudaError("padKernel_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Prepare data for "pad to border" addressing mode //////////////////////////////////////////////////////////////////////////////// extern "C" void padDataClampToBorder( float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelW, int kernelH, int kernelY, int kernelX ) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(fftW, threads.x), iDivUp(fftH, threads.y)); SET_FLOAT_BASE; padDataClampToBorder_kernel<<<grid, threads>>>( d_Dst, d_Src, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX ); getLastCudaError("padDataClampToBorder_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Modulate Fourier image of padded data by Fourier image of padded kernel // and normalize by FFT size //////////////////////////////////////////////////////////////////////////////// extern "C" void modulateAndNormalize( fComplex *d_Dst, fComplex *d_Src, int fftH, int fftW, int padding ) { assert(fftW % 2 == 0); const int dataSize = fftH * (fftW / 2 + padding); modulateAndNormalize_kernel<<<iDivUp(dataSize, 256), 256>>>( d_Dst, d_Src, dataSize, 1.0f / (float)(fftW *fftH) ); getLastCudaError("modulateAndNormalize() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // 2D R2C / C2R post/preprocessing kernels //////////////////////////////////////////////////////////////////////////////// static const double PI = 3.1415926535897932384626433832795; //static const uint BLOCKDIM = 1024; static const uint BLOCKDIM = 512; //static const uint BLOCKDIM = 256; //static const uint BLOCKDIM = 128; //static const uint BLOCKDIM = 64; //static const uint BLOCKDIM = 32; //static const uint BLOCKDIM = 16; extern "C" void spPostprocess2D( void *d_Dst, void *d_Src, uint DY, uint DX, uint padding, int dir ) { assert(d_Src != d_Dst); assert(DX % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = DY * (DX / 2); const double phaseBase = dir * PI / (double)DX; SET_FCOMPLEX_BASE; spPostprocess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>( (fComplex *)d_Dst, (fComplex *)d_Src, DY, DX, threadCount, padding, (float)phaseBase ); getLastCudaError("spPostprocess2D_kernel<<<>>> execution failed\n"); } extern "C" void spPreprocess2D( void *d_Dst, void *d_Src, uint DY, uint DX, uint padding, int dir ) { assert(d_Src != d_Dst); assert(DX % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = DY * (DX / 2); const double phaseBase = -dir * PI / (double)DX; SET_FCOMPLEX_BASE; spPreprocess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>( (fComplex *)d_Dst, (fComplex *)d_Src, DY, DX, threadCount, padding, (float)phaseBase ); getLastCudaError("spPreprocess2D_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Combined spPostprocess2D + modulateAndNormalize + spPreprocess2D //////////////////////////////////////////////////////////////////////////////// extern "C" void spProcess2D( void *d_Dst, void *d_SrcA, void *d_SrcB, uint DY, uint DX, int dir ) { assert(DY % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = (DY / 2) * DX; const double phaseBase = dir * PI / (double)DX; SET_FCOMPLEX_BASE_A; SET_FCOMPLEX_BASE_B; spProcess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>( (fComplex *)d_Dst, (fComplex *)d_SrcA, (fComplex *)d_SrcB, DY, DX, threadCount, (float)phaseBase, 0.5f / (float)(DY *DX) ); getLastCudaError("spProcess2D_kernel<<<>>> execution failed\n"); }
3924d00b2b95cb988b29e496fa834f9e0a40fccc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sum(float *A, float *B, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; }
3924d00b2b95cb988b29e496fa834f9e0a40fccc.cu
#include "includes.h" __global__ void sum(float *A, float *B, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; }
ee47e75f6317d397a058cf6a9a81eb6cb19aa7a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void OpenBoundaryKernel (double *Vrad, double *Dens, double *Energy, int nsec, double SigmaMed) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = 1; if(j < nsec){ Dens[(i-1)*nsec + j] = Dens[i*nsec + j]; // copy first ring into ghost ring Energy[(i-1)*nsec + j] = Energy[i*nsec + j]; if (Vrad[(i+1)*nsec + j] > 0.0 || (Dens[i*nsec + j] < SigmaMed)) Vrad[i*nsec + j] = 0.0; // we just allow outflow [inwards] else Vrad[i*nsec +j] = Vrad[(i+1)*nsec + j]; } }
ee47e75f6317d397a058cf6a9a81eb6cb19aa7a9.cu
#include "includes.h" __global__ void OpenBoundaryKernel (double *Vrad, double *Dens, double *Energy, int nsec, double SigmaMed) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = 1; if(j < nsec){ Dens[(i-1)*nsec + j] = Dens[i*nsec + j]; // copy first ring into ghost ring Energy[(i-1)*nsec + j] = Energy[i*nsec + j]; if (Vrad[(i+1)*nsec + j] > 0.0 || (Dens[i*nsec + j] < SigmaMed)) Vrad[i*nsec + j] = 0.0; // we just allow outflow [inwards] else Vrad[i*nsec +j] = Vrad[(i+1)*nsec + j]; } }