hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
80e0b9a0f75f0b78e78b1ec304404ec8fcf596a8.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include <sys/time.h> // includes, kernels #include "backprop_cuda_kernel.cu" #include "backprop.h" //////////////////////////////////////////////////////////////////////////////// extern "C" void bpnn222_layerforward(float *l1, float *l2, float **conn, int n1, int n2); extern "C" void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err); extern "C" void bpnn_hidden_error(float *delta_h, int nh, float *delta_o, int no, float **who, float *hidden, float *err); extern "C" void bpnn222_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw); extern "C" int setup(int argc, char** argv); extern "C" float **alloc_2d_dbl(int m, int n); extern "C" float squash(float x); double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } unsigned int num_threads = 0; unsigned int num_blocks = 0; //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { setup(argc, argv); } extern "C" void bpnn_train_cuda(BPNN *net, float *eo, float *eh) { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; #ifdef GPU int m = 0; float *input_hidden_cuda; float *input_cuda; float *output_hidden_cuda; float *partial_sum; float *hidden_partial_sum; float *hidden_delta_cuda; float *input_prev_weights_cuda; float sum; float *input_weights_one_dim; float *input_weights_prev_one_dim; num_blocks = in / 16; dim3 grid( 1 , num_blocks); dim3 threads(16 , 16); input_weights_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float)); input_weights_prev_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float)); partial_sum = (float *) malloc(num_blocks * WIDTH * sizeof(float)); // this preprocessing stage is added to correct the bugs of wrong memcopy using two-dimensional net->inputweights for (int k = 0; k <= in; k++) { for (int j = 0; j <= hid; j++) { input_weights_one_dim[m] = net->input_weights[k][j]; input_weights_prev_one_dim[m] = net-> input_prev_weights[k][j]; m++; } } hipMalloc((void**) &input_cuda, (in + 1) * sizeof(float)); hipMalloc((void**) &output_hidden_cuda, (hid + 1) * sizeof(float)); hipMalloc((void**) &input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float)); hipMalloc((void**) &hidden_partial_sum, num_blocks * WIDTH * sizeof(float)); #endif #ifdef CPU printf("Performing CPU computation\n"); bpnn222_layerforward(net->input_units, net->hidden_units,net->input_weights, in, hid); #endif #ifdef GPU printf("Performing GPU computation\n"); //printf("in= %d, hid = %d, numblocks = %d\n", in, hid, num_blocks); hipMemcpy(input_cuda, net->input_units, (in + 1) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( bpnn222_layerforward_CUDA), dim3(grid), dim3(threads) , 0, 0, input_cuda, output_hidden_cuda, input_hidden_cuda, hidden_partial_sum, in, hid); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if (error != hipSuccess) { printf("bpnn kernel error: %s\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipMemcpy(partial_sum, hidden_partial_sum, num_blocks * WIDTH * sizeof(float), hipMemcpyDeviceToHost); for (int j = 1; j <= hid; j++) { sum = 0.0; for (int k = 0; k < num_blocks; k++) { sum += partial_sum[k * hid + j-1] ; } sum += net->input_weights[0][j]; net-> hidden_units[j] = float(1.0 / (1.0 + exp(-sum))); } #endif bpnn222_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); bpnn222_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); #ifdef CPU bpnn222_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights); #endif #ifdef GPU hipMalloc((void**) &hidden_delta_cuda, (hid + 1) * sizeof(float)); hipMalloc((void**) &input_prev_weights_cuda, (in + 1) * (hid + 1) * sizeof(float)); hipMemcpy(hidden_delta_cuda, net->hidden_delta, (hid + 1) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(input_prev_weights_cuda, input_weights_prev_one_dim, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( bpnn222_adjust_weights_cuda), dim3(grid), dim3(threads) , 0, 0, hidden_delta_cuda, hid, input_cuda, in, input_hidden_cuda, input_prev_weights_cuda ); hipMemcpy(net->input_units, input_cuda, (in + 1) * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(input_weights_one_dim, input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float), hipMemcpyDeviceToHost); hipFree(input_cuda); hipFree(output_hidden_cuda); hipFree(input_hidden_cuda); hipFree(hidden_partial_sum); hipFree(input_prev_weights_cuda); hipFree(hidden_delta_cuda); free(partial_sum); free(input_weights_one_dim); free(input_weights_prev_one_dim); #endif }
80e0b9a0f75f0b78e78b1ec304404ec8fcf596a8.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda.h> #include <sys/time.h> // includes, kernels #include "backprop_cuda_kernel.cu" #include "backprop.h" //////////////////////////////////////////////////////////////////////////////// extern "C" void bpnn222_layerforward(float *l1, float *l2, float **conn, int n1, int n2); extern "C" void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err); extern "C" void bpnn_hidden_error(float *delta_h, int nh, float *delta_o, int no, float **who, float *hidden, float *err); extern "C" void bpnn222_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw); extern "C" int setup(int argc, char** argv); extern "C" float **alloc_2d_dbl(int m, int n); extern "C" float squash(float x); double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } unsigned int num_threads = 0; unsigned int num_blocks = 0; //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { setup(argc, argv); } extern "C" void bpnn_train_cuda(BPNN *net, float *eo, float *eh) { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; #ifdef GPU int m = 0; float *input_hidden_cuda; float *input_cuda; float *output_hidden_cuda; float *partial_sum; float *hidden_partial_sum; float *hidden_delta_cuda; float *input_prev_weights_cuda; float sum; float *input_weights_one_dim; float *input_weights_prev_one_dim; num_blocks = in / 16; dim3 grid( 1 , num_blocks); dim3 threads(16 , 16); input_weights_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float)); input_weights_prev_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float)); partial_sum = (float *) malloc(num_blocks * WIDTH * sizeof(float)); // this preprocessing stage is added to correct the bugs of wrong memcopy using two-dimensional net->inputweights for (int k = 0; k <= in; k++) { for (int j = 0; j <= hid; j++) { input_weights_one_dim[m] = net->input_weights[k][j]; input_weights_prev_one_dim[m] = net-> input_prev_weights[k][j]; m++; } } cudaMalloc((void**) &input_cuda, (in + 1) * sizeof(float)); cudaMalloc((void**) &output_hidden_cuda, (hid + 1) * sizeof(float)); cudaMalloc((void**) &input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float)); cudaMalloc((void**) &hidden_partial_sum, num_blocks * WIDTH * sizeof(float)); #endif #ifdef CPU printf("Performing CPU computation\n"); bpnn222_layerforward(net->input_units, net->hidden_units,net->input_weights, in, hid); #endif #ifdef GPU printf("Performing GPU computation\n"); //printf("in= %d, hid = %d, numblocks = %d\n", in, hid, num_blocks); cudaMemcpy(input_cuda, net->input_units, (in + 1) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyHostToDevice); bpnn222_layerforward_CUDA<<< grid, threads >>>(input_cuda, output_hidden_cuda, input_hidden_cuda, hidden_partial_sum, in, hid); cudaThreadSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("bpnn kernel error: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaMemcpy(partial_sum, hidden_partial_sum, num_blocks * WIDTH * sizeof(float), cudaMemcpyDeviceToHost); for (int j = 1; j <= hid; j++) { sum = 0.0; for (int k = 0; k < num_blocks; k++) { sum += partial_sum[k * hid + j-1] ; } sum += net->input_weights[0][j]; net-> hidden_units[j] = float(1.0 / (1.0 + exp(-sum))); } #endif bpnn222_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); bpnn222_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); #ifdef CPU bpnn222_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights); #endif #ifdef GPU cudaMalloc((void**) &hidden_delta_cuda, (hid + 1) * sizeof(float)); cudaMalloc((void**) &input_prev_weights_cuda, (in + 1) * (hid + 1) * sizeof(float)); cudaMemcpy(hidden_delta_cuda, net->hidden_delta, (hid + 1) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(input_prev_weights_cuda, input_weights_prev_one_dim, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(input_hidden_cuda, input_weights_one_dim, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyHostToDevice); bpnn222_adjust_weights_cuda<<< grid, threads >>>(hidden_delta_cuda, hid, input_cuda, in, input_hidden_cuda, input_prev_weights_cuda ); cudaMemcpy(net->input_units, input_cuda, (in + 1) * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(input_weights_one_dim, input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(input_cuda); cudaFree(output_hidden_cuda); cudaFree(input_hidden_cuda); cudaFree(hidden_partial_sum); cudaFree(input_prev_weights_cuda); cudaFree(hidden_delta_cuda); free(partial_sum); free(input_weights_one_dim); free(input_weights_prev_one_dim); #endif }
ab72bd451a195bee5b73c9f02135262bf050c2f5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithms.hpp> #include <graph.hpp> #include <utilities/cuda_utils.cuh> #include <rmm/thrust_rmm_allocator.h> #include <utilities/error_utils.h> namespace { template <typename vertex_t, typename edge_t, typename weight_t, bool has_weight> std::unique_ptr<cugraph::experimental::GraphCOO<vertex_t, edge_t, weight_t>> extract_subgraph_by_vertices( cugraph::experimental::GraphCOOView<vertex_t, edge_t, weight_t> const &graph, vertex_t const *vertices, vertex_t num_vertices, hipStream_t stream) { edge_t graph_num_verts = graph.number_of_vertices; rmm::device_vector<int64_t> error_count_v{1, 0}; rmm::device_vector<vertex_t> vertex_used_v{graph_num_verts, num_vertices}; vertex_t *d_vertex_used = vertex_used_v.data().get(); int64_t *d_error_count = error_count_v.data().get(); thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_vertices), [vertices, d_vertex_used, d_error_count, graph_num_verts] __device__(vertex_t idx) { vertex_t v = vertices[idx]; if ((v >= 0) && (v < graph_num_verts)) { d_vertex_used[v] = idx; } else { cugraph::atomicAdd(d_error_count, int64_t{1}); } }); CUGRAPH_EXPECTS(error_count_v[0] == 0, "Input error... vertices specifies vertex id out of range"); vertex_t *graph_src = graph.src_indices; vertex_t *graph_dst = graph.dst_indices; weight_t *graph_weight = graph.edge_data; // iterate over the edges and count how many make it into the output int64_t count = thrust::count_if( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [graph_src, graph_dst, d_vertex_used, num_vertices] __device__(edge_t e) { vertex_t s = graph_src[e]; vertex_t d = graph_dst[e]; return ((d_vertex_used[s] < num_vertices) && (d_vertex_used[d] < num_vertices)); }); if (count > 0) { auto result = std::make_unique<cugraph::experimental::GraphCOO<vertex_t, edge_t, weight_t>>( num_vertices, count, has_weight); vertex_t *d_new_src = result->src_indices(); vertex_t *d_new_dst = result->dst_indices(); weight_t *d_new_weight = result->edge_data(); // reusing error_count as a vertex counter... thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [graph_src, graph_dst, graph_weight, d_vertex_used, num_vertices, d_error_count, d_new_src, d_new_dst, d_new_weight] __device__(edge_t e) { vertex_t s = graph_src[e]; vertex_t d = graph_dst[e]; if ((d_vertex_used[s] < num_vertices) && (d_vertex_used[d] < num_vertices)) { // NOTE: Could avoid atomic here by doing a inclusive sum, but that would // require 2*|E| temporary memory. If this becomes important perhaps // we make 2 implementations and pick one based on the number of // vertices in the subgraph set. auto pos = cugraph::atomicAdd(d_error_count, 1); d_new_src[pos] = d_vertex_used[s]; d_new_dst[pos] = d_vertex_used[d]; if (has_weight) d_new_weight[pos] = graph_weight[e]; } }); return result; } else { return std::make_unique<cugraph::experimental::GraphCOO<vertex_t, edge_t, weight_t>>( 0, 0, has_weight); } } } // namespace namespace cugraph { namespace nvgraph { template <typename VT, typename ET, typename WT> std::unique_ptr<experimental::GraphCOO<VT, ET, WT>> extract_subgraph_vertex( experimental::GraphCOOView<VT, ET, WT> const &graph, VT const *vertices, VT num_vertices) { CUGRAPH_EXPECTS(vertices != nullptr, "API error, vertices must be non null"); hipStream_t stream{0}; if (graph.edge_data == nullptr) { return extract_subgraph_by_vertices<VT, ET, WT, false>(graph, vertices, num_vertices, stream); } else { return extract_subgraph_by_vertices<VT, ET, WT, true>(graph, vertices, num_vertices, stream); } } template std::unique_ptr<experimental::GraphCOO<int32_t, int32_t, float>> extract_subgraph_vertex<int32_t, int32_t, float>( experimental::GraphCOOView<int32_t, int32_t, float> const &, int32_t const *, int32_t); template std::unique_ptr<experimental::GraphCOO<int32_t, int32_t, double>> extract_subgraph_vertex<int32_t, int32_t, double>( experimental::GraphCOOView<int32_t, int32_t, double> const &, int32_t const *, int32_t); } // namespace nvgraph } // namespace cugraph
ab72bd451a195bee5b73c9f02135262bf050c2f5.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithms.hpp> #include <graph.hpp> #include <utilities/cuda_utils.cuh> #include <rmm/thrust_rmm_allocator.h> #include <utilities/error_utils.h> namespace { template <typename vertex_t, typename edge_t, typename weight_t, bool has_weight> std::unique_ptr<cugraph::experimental::GraphCOO<vertex_t, edge_t, weight_t>> extract_subgraph_by_vertices( cugraph::experimental::GraphCOOView<vertex_t, edge_t, weight_t> const &graph, vertex_t const *vertices, vertex_t num_vertices, cudaStream_t stream) { edge_t graph_num_verts = graph.number_of_vertices; rmm::device_vector<int64_t> error_count_v{1, 0}; rmm::device_vector<vertex_t> vertex_used_v{graph_num_verts, num_vertices}; vertex_t *d_vertex_used = vertex_used_v.data().get(); int64_t *d_error_count = error_count_v.data().get(); thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_vertices), [vertices, d_vertex_used, d_error_count, graph_num_verts] __device__(vertex_t idx) { vertex_t v = vertices[idx]; if ((v >= 0) && (v < graph_num_verts)) { d_vertex_used[v] = idx; } else { cugraph::atomicAdd(d_error_count, int64_t{1}); } }); CUGRAPH_EXPECTS(error_count_v[0] == 0, "Input error... vertices specifies vertex id out of range"); vertex_t *graph_src = graph.src_indices; vertex_t *graph_dst = graph.dst_indices; weight_t *graph_weight = graph.edge_data; // iterate over the edges and count how many make it into the output int64_t count = thrust::count_if( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [graph_src, graph_dst, d_vertex_used, num_vertices] __device__(edge_t e) { vertex_t s = graph_src[e]; vertex_t d = graph_dst[e]; return ((d_vertex_used[s] < num_vertices) && (d_vertex_used[d] < num_vertices)); }); if (count > 0) { auto result = std::make_unique<cugraph::experimental::GraphCOO<vertex_t, edge_t, weight_t>>( num_vertices, count, has_weight); vertex_t *d_new_src = result->src_indices(); vertex_t *d_new_dst = result->dst_indices(); weight_t *d_new_weight = result->edge_data(); // reusing error_count as a vertex counter... thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [graph_src, graph_dst, graph_weight, d_vertex_used, num_vertices, d_error_count, d_new_src, d_new_dst, d_new_weight] __device__(edge_t e) { vertex_t s = graph_src[e]; vertex_t d = graph_dst[e]; if ((d_vertex_used[s] < num_vertices) && (d_vertex_used[d] < num_vertices)) { // NOTE: Could avoid atomic here by doing a inclusive sum, but that would // require 2*|E| temporary memory. If this becomes important perhaps // we make 2 implementations and pick one based on the number of // vertices in the subgraph set. auto pos = cugraph::atomicAdd(d_error_count, 1); d_new_src[pos] = d_vertex_used[s]; d_new_dst[pos] = d_vertex_used[d]; if (has_weight) d_new_weight[pos] = graph_weight[e]; } }); return result; } else { return std::make_unique<cugraph::experimental::GraphCOO<vertex_t, edge_t, weight_t>>( 0, 0, has_weight); } } } // namespace namespace cugraph { namespace nvgraph { template <typename VT, typename ET, typename WT> std::unique_ptr<experimental::GraphCOO<VT, ET, WT>> extract_subgraph_vertex( experimental::GraphCOOView<VT, ET, WT> const &graph, VT const *vertices, VT num_vertices) { CUGRAPH_EXPECTS(vertices != nullptr, "API error, vertices must be non null"); cudaStream_t stream{0}; if (graph.edge_data == nullptr) { return extract_subgraph_by_vertices<VT, ET, WT, false>(graph, vertices, num_vertices, stream); } else { return extract_subgraph_by_vertices<VT, ET, WT, true>(graph, vertices, num_vertices, stream); } } template std::unique_ptr<experimental::GraphCOO<int32_t, int32_t, float>> extract_subgraph_vertex<int32_t, int32_t, float>( experimental::GraphCOOView<int32_t, int32_t, float> const &, int32_t const *, int32_t); template std::unique_ptr<experimental::GraphCOO<int32_t, int32_t, double>> extract_subgraph_vertex<int32_t, int32_t, double>( experimental::GraphCOOView<int32_t, int32_t, double> const &, int32_t const *, int32_t); } // namespace nvgraph } // namespace cugraph
db74ad4c4af645c1dd7b0ac701101bed6088e375.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <util.h> #include <amgx_timer.h> #include <amg.h> #include <basic_types.h> #include <types.h> #include <norm.h> #include <logger.h> #include <iostream> #include <iomanip> #include <blas.h> #include <multiply.h> #include <algorithm> #include <amg_level.h> #include <amgx_c.h> #include <profile.h> #include <distributed/glue.h> #include <misc.h> #include <string> #include <cassert> #include <csr_multiply.h> #include <memory_info.h> #include <thrust/sort.h> #include <thrust/remove.h> #include <thrust/unique.h> #include <thrust/binary_search.h> #include <thrust/iterator/constant_iterator.h> namespace amgx { template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> AMG<t_vecPrec, t_matPrec, t_indPrec> ::AMG(AMG_Config &cfg, const std::string &cfg_scope) : fine_h(0), fine_d(0), m_cfg(&cfg), m_cfg_scope(cfg_scope), ref_count(1), csr_workspace(NULL), d2_workspace(NULL) { cycle_iters = cfg.getParameter<int>("cycle_iters", cfg_scope); norm = cfg.getParameter<NormType>("norm", cfg_scope); max_levels = cfg.getParameter<int>( "max_levels", cfg_scope ); coarsen_threshold = cfg.getParameter<double>("coarsen_threshold", cfg_scope); min_fine_rows = cfg.getParameter<int>( "min_fine_rows", cfg_scope ); min_coarse_rows = cfg.getParameter<int>( "min_coarse_rows", cfg_scope); m_amg_consolidation_flag = cfg.getParameter<int>("amg_consolidation_flag", cfg_scope); m_consolidation_lower_threshold = cfg.getParameter<int>("matrix_consolidation_lower_threshold", cfg_scope); m_consolidation_upper_threshold = cfg.getParameter<int>("matrix_consolidation_upper_threshold", cfg_scope); m_sum_stopping_criteria = cfg.getParameter<int>("use_sum_stopping_criteria", cfg_scope); m_structure_reuse_levels = cfg.getParameter<int>("structure_reuse_levels", cfg_scope); m_amg_host_levels_rows = cfg.getParameter<int>("amg_host_levels_rows", cfg_scope); if (m_consolidation_upper_threshold <= m_consolidation_lower_threshold) { FatalError("Error, matrix_consolidation_lower_threshold must be smaller than matrix_consolidation_upper_threshold", AMGX_ERR_CONFIGURATION); } std::string solverName, new_scope, tmp_scope; cfg.getParameter<std::string>( "coarse_solver", solverName, cfg_scope, new_scope ); if (solverName.compare("NOSOLVER") == 0) { coarse_solver_d = NULL; coarse_solver_h = NULL; } else { coarse_solver_d = SolverFactory<TConfig_d>::allocate(cfg, cfg_scope, "coarse_solver"); coarse_solver_h = SolverFactory<TConfig_h>::allocate(cfg, cfg_scope, "coarse_solver"); } //NOTE: //if dense_lu_num_rows=0 then either you are not using dense solver (it was not selected) or the matrix size for it to be used was set to zero //if dense_lu_max_rows=0 then either you are not using dense solver or you don't want to cap the maximum matrix size m_dense_lu_num_rows = 0; m_dense_lu_max_rows = 0; if ( solverName == "DENSE_LU_SOLVER" ) { m_dense_lu_num_rows = cfg.getParameter<int>( "dense_lu_num_rows", cfg_scope ); m_dense_lu_max_rows = cfg.getParameter<int>( "dense_lu_max_rows", cfg_scope ); } } template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::allocate_fine_level() { fine_d = AMG_LevelFactory<TConfig_d>::allocate(this, tmng); fine_h = AMG_LevelFactory<TConfig_h>::allocate(this, tmng); } // Print the settings used by amg solver template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::printSettings() const { std::cout << std::endl; std::cout << "AMG solver settings:" << std::endl; std::cout << "cycle_iters = " << cycle_iters << std::endl; std::cout << "norm = " << getString(norm) << std::endl; std::cout << "presweeps = " << getNumPresweeps() << std::endl; std::cout << "postsweeps = " << getNumPostsweeps() << std::endl; std::cout << "max_levels = " << max_levels << std::endl; std::cout << "coarsen_threshold = " << coarsen_threshold << std::endl; std::cout << "min_fine_rows = " << min_fine_rows << std::endl; std::cout << "min_coarse_rows = " << min_coarse_rows << std::endl; std::cout << "coarse_solver_d: " << this->coarse_solver_d->getName() << " with scope name " << this->coarse_solver_d->getScope() << std::endl; std::cout << "coarse_solver_h: " << this->coarse_solver_h->getName() << " with scope name " << this->coarse_solver_h->getScope() << std::endl; } template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> AMG<t_vecPrec, t_matPrec, t_indPrec>::~AMG() { if (fine_d) { delete fine_d; } if (fine_h) { delete fine_h; } // Don't delete both since the hierarchies meet at some point !!! delete coarse_solver_d; delete coarse_solver_h; if ( d2_workspace != NULL && d2_workspace != csr_workspace ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( d2_workspace ); csr_workspace = NULL; } if ( csr_workspace != NULL ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( csr_workspace ); csr_workspace = NULL; } } template<class T_Config> void logDeviceType() { AMGXLOG("Devicetype", T_Config::MemSpaceInfo::getName()) } /********************************************************** * Setups the AMG system *********************************************************/ void analyze_coloring(device_vector_alloc<int> aggregates_d, device_vector_alloc<int> colors_d); template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > class AMG_Setup { public: template< typename TConfig_hd > static AMG_Level<TConfig_hd> *setup( AMG<t_vecPrec, t_matPrec, t_indPrec> *amg, AMG_Level<TConfig_hd> *&level, int min_rows, bool hybrid ) { typedef typename TConfig_hd::MemSpace MemorySpace; typedef TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> TConfig_h; typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; typedef typename Matrix<TConfig_h>::IVector IVector_h; typedef typename Matrix<TConfig_d>::IVector IVector_d; typedef typename Matrix<TConfig_h>::VVector VVector_h; typedef typename Matrix<TConfig_d>::VVector VVector_d; typedef typename Matrix<TConfig_h>::MVector MVector_h; typedef typename Matrix<TConfig_d>::MVector MVector_d; typedef typename Matrix<TConfig_hd>::IVector IVector_hd; typedef typename Matrix<TConfig_hd>::VVector VVector_hd; typedef typename Matrix<TConfig_hd>::MVector MVector_hd; typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA; typedef typename VecPrecisionMap<t_vecPrec>::Type ValueTypeB; static const AMGX_MemorySpace other_memspace = MemorySpaceMap<opposite_memspace<TConfig_hd::memSpace>::memspace>::id; typedef TemplateConfig<other_memspace, t_vecPrec, t_matPrec, t_indPrec> TConfig1; typedef TConfig1 T_Config1; MemorySpace memorySpaceTag; // The previous level. AMG_Level<TConfig_hd> *prev_level = 0L; typedef TemplateConfig<AMGX_host, AMGX_vecInt, t_matPrec, t_indPrec> hvector_type; typedef Vector<hvector_type> HVector; std::vector<HVector> partition_rows(0); HVector num_rows(1); int64_t num_rows_global; num_rows[0] = num_rows_global = level->getNumRows( ); int min_partition_rows = num_rows[0]; if (level->getA().is_matrix_distributed()) { level->getA( ).manager->getComms()->global_reduce(partition_rows, num_rows, level->getA( ), level->tag * 100 + 7); num_rows_global = 0; for (int i = 0; i < partition_rows.size(); i++) { min_partition_rows = ::min(partition_rows[i][0], min_partition_rows); num_rows_global += partition_rows[i][0]; } } Solver<TConfig_hd> *coarseSolver = amg->getCoarseSolver( MemorySpace() ); bool coarseSolverExists = coarseSolver != NULL; // Build the remaining / all the levels on the CPU. Note: level_h is NULL if all the setup happened on the GPU. while (true) { nvtxRange test("setup_level"); //Check if you reached the coarsest level (min_partition_rows is the number of rows in this partition/rank) //NOTE: min_rows = min_coarse_rows if async framework is disabled (min_fine_rows =< min_coarse_rows) if (amg->num_levels >= amg->max_levels || min_partition_rows <= min_rows) { //Check if the user wishes to use DENSE_LU_SOLVER capping the matrix the size, and the matrix size exceeds the maximum allowed //NOTE: if dense_lu_max_rows=0 then either you are not using dense solver or you don't want to cap the maximum matrix size if ((amg->m_dense_lu_max_rows != 0) && (min_partition_rows > amg->m_dense_lu_max_rows)) { amg->setCoarseSolver(NULL, MemorySpace()); delete coarseSolver; coarseSolver = NULL; coarseSolverExists = false; } //Check if there is no coarse solver, then setup the smoother to solve the coarsest level if (!coarseSolverExists) { level->setup_smoother(); } return level; } // Allocate next level or use existing one int reuse_next_level; AMG_Level<TConfig_hd> *nextLevel; if (!level->getNextLevel(MemorySpace()) || (amg->m_structure_reuse_levels <= amg->num_levels && amg->m_structure_reuse_levels != -1)) { if (level->getNextLevel(MemorySpace())) { delete level->getNextLevel(MemorySpace()); } reuse_next_level = 0; level->setReuseLevel(false); nextLevel = AMG_LevelFactory<TConfig_hd>::allocate(amg, level->getSmoother()->get_thread_manager()); level->setNextLevel( nextLevel ); } else { // reuse existing next level reuse_next_level = 1; level->setReuseLevel(true); nextLevel = level->getNextLevel(MemorySpace()); /* WARNING: we do not recompute prolongation (P) and restriction (R) when we are reusing the level structure (structure_reuse_levels > 0), but we do need to modify an existing coarse matrix Ac=R*A*P. Instead of calling Ac.set_initialized(0) in every path afterwards, we wil call it here. Notice that in the if part of this statement above when the new level is allocated it creates a new matrix which is not initialized by default (see the matrix constructor): AMG_Level_Factory::allocate -> Classical_AMG_LevelFactory::create -> new Classical_AMG_Level -> new AMG_Level -> new Matrix We are just matching this Ac.set_initialized(0) setting here. */ Matrix<TConfig_hd> &Ac = nextLevel->getA(); Ac.set_initialized(0); } nextLevel->setLevelIndex( amg->num_levels ); level->getA().template setParameter<int>("level", amg->num_levels); //profileLevelDown( ); { // only compute aggregates if we can't reuse existing ones if (!reuse_next_level) { level->createCoarseVertices( ); } } //set the amg_level_index for this matrix nextLevel->getA().amg_level_index = amg->num_levels; int64_t N = num_rows_global * level->getA().get_block_dimy(); num_rows[0] = num_rows_global = level->getNumCoarseVertices(); if (level->getA().is_matrix_distributed()) { level->getA().manager->getComms()->global_reduce( partition_rows, num_rows, level->getA(), level->tag * 100 + 8 ); num_rows_global = 0; for (int i = 0; i < partition_rows.size(); i++) { num_rows_global += partition_rows[i][0]; } } // num_rows[0] contains the total number of rows across all partitions int64_t nextN = num_rows_global * level->getA().get_block_dimy(); if (!level->getA().is_matrix_distributed()) { min_partition_rows = num_rows[0]; } else { int num_parts = level->getA().manager->getComms()->get_num_partitions(); float avg_size = num_rows_global / num_parts; if (avg_size < amg->m_consolidation_lower_threshold) { if (level->isClassicalAMGLevel()) { FatalError("Consolidation with classical path not supported)", AMGX_ERR_NOT_IMPLEMENTED); } int new_num_parts; bool want_neighbors = false; level->getA().manager->computeDestinationPartitions(amg->m_consolidation_upper_threshold, avg_size, num_parts, new_num_parts, want_neighbors); if (new_num_parts != num_parts) { level->setIsConsolidationLevel(true); // Modify partition_rows so that non-consolidated partitions have 0 rows // Root partitions have total number of rows to consolidate IVector_h row_count_part(num_parts, 0); for (int i = 0; i < num_parts; i++) { row_count_part[level->getA().manager->getDestinationPartitions()[i]] += partition_rows[i][0]; } for (int i = 0; i < num_parts; i++) { partition_rows[i][0] = row_count_part[i]; } } } if (!amg->m_sum_stopping_criteria) { min_partition_rows = INT_MAX; for (int i = 0; i < partition_rows.size(); i++) { // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation // If classical AMG, include all partitions if ( level->isClassicalAMGLevel() || (!(level->isClassicalAMGLevel()) && partition_rows[i][0] != 0)) { min_partition_rows = ::min(partition_rows[i][0], min_partition_rows); } } } else { // use sum instead of min min_partition_rows = 0; for (int i = 0; i < partition_rows.size(); i++) { // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation // If classical AMG, include all partitions if ( level->isClassicalAMGLevel() || (!(level->isClassicalAMGLevel()) && partition_rows[i][0] != 0)) { min_partition_rows += partition_rows[i][0]; } } } } // stop here if next level size is < min_rows if ( nextN <= amg->coarsen_threshold * N && nextN != N && min_partition_rows >= min_rows ) { level->createCoarseMatrices(); // Resize coarse vectors. int nextSize = level->getNextLevelSize(); level->getxc( ).resize( nextSize ); level->getxc().set_block_dimy(level->getA( ).get_block_dimy()); level->getxc().set_block_dimx(1); level->getxc().tag = nextLevel->tag * 100 + 1; level->getbc( ).resize( nextSize ); level->getbc().set_block_dimy(level->getA( ).get_block_dimy()); level->getbc().set_block_dimx(1); level->getbc().tag = nextLevel->tag * 100 + 0; int size, offset; level->getA().getOffsetAndSizeForView(FULL, &offset, &size); level->getr().resize( size * level->getA( ).get_block_dimy() ); level->getr().set_block_dimy(level->getA( ).get_block_dimy()); level->getr().set_block_dimx(1); level->getr().tag = nextLevel->tag * 100 + 2; } else { // delete next level that we just created level->deleteNextLevel( memorySpaceTag ); } if (!level->isCoarsest() || !coarseSolverExists) { level->setup_smoother(); } if (level->isCoarsest()) { break; } // If consolidation level and not root partition, break; if (!level->getA().is_matrix_singleGPU() && level->isConsolidationLevel() && !level->getA().manager->isRootPartition()) { amg->setCoarseSolver(NULL, MemorySpace()); delete coarseSolver; coarseSolver = NULL; coarseSolverExists = false; break; } nextLevel->setup(); // Move to the next level. prev_level = level; level = nextLevel; // Increment the level counter. amg->num_levels++; } //end of while(true) return prev_level; } template< typename TConfig_hd > static AMG_Level<TConfig_hd> *setup_v2( AMG<t_vecPrec, t_matPrec, t_indPrec> *amg, AMG_Level<TConfig_hd> *&level, int min_rows, bool hybrid ) { typedef typename TConfig_hd::MemSpace MemorySpace; typedef TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> TConfig_h; typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; typedef typename Matrix<TConfig_h>::IVector IVector_h; typedef typename Matrix<TConfig_d>::IVector IVector_d; typedef typename Matrix<TConfig_h>::VVector VVector_h; typedef typename Matrix<TConfig_d>::VVector VVector_d; typedef typename Matrix<TConfig_h>::MVector MVector_h; typedef typename Matrix<TConfig_d>::MVector MVector_d; typedef typename Matrix<TConfig_hd>::IVector IVector_hd; typedef typename Matrix<TConfig_hd>::VVector VVector_hd; typedef typename Matrix<TConfig_hd>::MVector MVector_hd; typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA; typedef typename VecPrecisionMap<t_vecPrec>::Type ValueTypeB; MemorySpace memorySpaceTag; // The previous level. AMG_Level<TConfig_hd> *prev_level = 0L; typedef TemplateConfig<AMGX_host, AMGX_vecInt, t_matPrec, t_indPrec> hvector_type; typedef Vector<hvector_type> HVector; std::vector<HVector> partition_rows(0); HVector num_rows(1); int64_t num_rows_global; num_rows[0] = num_rows_global = level->getNumRows( ); int min_partition_rows = INT_MAX, offset = 0, n = 0, num_parts = 1, num_active_parts = 0; float avg_size; if (level->getA().is_matrix_distributed()) { num_parts = level->getA().manager->getComms()->get_num_partitions(); level->getA( ).manager->getComms()->global_reduce(partition_rows, num_rows, level->getA( ), level->tag * 100 + 7); num_rows_global = 0; for (int i = 0; i < partition_rows.size(); i++) { if (partition_rows[i][0] != 0) { min_partition_rows = ::min(partition_rows[i][0], min_partition_rows); num_active_parts++; } num_rows_global += partition_rows[i][0]; } if (min_partition_rows == INT_MAX) { min_partition_rows = 0; } } IVector_h row_count_part(num_parts, 0); Solver<TConfig_hd> *coarseSolver = amg->getCoarseSolver( MemorySpace() ); bool coarseSolverExists = coarseSolver != NULL; // Build the remaining / all the levels on the CPU. Note: level_h is NULL if all the setup happened on the GPU. while (true) { // Glue matrices of the current level avg_size = num_rows_global / num_parts; // Allow to glue other levels tha 0 if COARSE_CLA_CONSO is true #if COARSE_CLA_CONSO if (level->getA().is_matrix_distributed() && avg_size < amg->m_consolidation_lower_threshold) { #else if (level->getA().is_matrix_distributed() && avg_size < amg->m_consolidation_lower_threshold && level->getLevelIndex() == 0) { #endif // Just remove level->getLevelIndex() == 0 in the previous test to allow coarse level consolidation #ifdef AMGX_WITH_MPI level->getA().manager->setIsGlued(false); int new_num_parts = glue_level(amg, level, num_active_parts); if (new_num_parts && new_num_parts != num_active_parts) { if (level->getA().manager->global_id() == 0) { std::cout << "Level " << level->getLevelIndex() << " has been consolidated : " << num_active_parts << " --> " << new_num_parts << std::endl; } // this is for coarse level consolidation if (level->getLevelIndex() > 0) { level->setIsConsolidationLevel(true); } level->setup(); num_active_parts = new_num_parts; // Modify partition_rows so that non-consolidated partitions have 0 rows // Root partitions have total number of rows to consolidate num_rows[0] = level->getNumRows(); level->getA().manager->getComms()->global_reduce( partition_rows, num_rows, level->getA(), level->tag * 100 + 33 ); // Update some local arrays and variables num_rows_global = 0; for (int i = 0; i < partition_rows.size(); i++) { num_rows_global += partition_rows[i][0]; } for (int i = 0; i < num_parts; i++) { row_count_part[level->getA().manager->getDestinationPartitions()[i]] += partition_rows[i][0]; } for (int i = 0; i < num_parts; i++) { partition_rows[i][0] = row_count_part[i]; } } else { level->getA().manager->setIsGlued(false); } #endif } level->getA().getOffsetAndSizeForView(OWNED, &offset, &n); if (!n) { // no coarse solver for empty matrices? // maybe we can deal with this in classical amg cycle amg->setCoarseSolver(NULL, MemorySpace()); delete coarseSolver; coarseSolver = NULL; coarseSolverExists = false; } //Check if you reached the coarsest level (min_partition_rows is the number of rows in this partition/rank) //NOTE: min_rows = min_coarse_rows if async framework is disabled (min_fine_rows =< min_coarse_rows) if (amg->num_levels >= amg->max_levels || min_partition_rows <= min_rows) { #if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT asyncmanager::singleton()->waitall(); #endif //Check if the user wishes to use DENSE_LU_SOLVER capping the matrix the size, and the matrix size exceeds the maximum allowed //NOTE: if dense_lu_max_rows=0 then either you are not using dense solver or you don't want to cap the maximum matrix size if ((amg->m_dense_lu_max_rows != 0) && (min_partition_rows > amg->m_dense_lu_max_rows)) { amg->setCoarseSolver(NULL, MemorySpace()); delete coarseSolver; coarseSolver = NULL; coarseSolverExists = false; } //Check if there is no coarse solver, then setup the smoother to solve the coarsest level // If n is 0 then the matrix is consolidated so we don't setup the smoother // We always setup the smoother on finest level if (!coarseSolverExists) { level->setup_smoother(); } return level; } // Allocate next level or use existing one int reuse_next_level; AMG_Level<TConfig_hd> *nextLevel; if (!level->getNextLevel(MemorySpace()) || (amg->m_structure_reuse_levels <= amg->num_levels && amg->m_structure_reuse_levels != -1)) { if (level->getNextLevel(MemorySpace())) { delete level->getNextLevel(MemorySpace()); } reuse_next_level = 0; level->setReuseLevel(false); nextLevel = AMG_LevelFactory<TConfig_hd>::allocate(amg, level->getSmoother()->get_thread_manager()); level->setNextLevel( nextLevel ); } else { // reuse existing next level reuse_next_level = 1; level->setReuseLevel(true); nextLevel = level->getNextLevel(MemorySpace()); /* WARNING: we do not recompute prolongation (P) and restriction (R) when we are reusing the level structure (structure_reuse_levels > 0), but we do need to modify an existing coarse matrix Ac=R*A*P. Instead of calling Ac.set_initialized(0) in every path afterwards, we wil call it here. Notice that in the if part of this statement above when the new level is allocated it creates a new matrix which is not initialized by default (see the matrix constructor): AMG_Level_Factory::allocate -> Classical_AMG_LevelFactory::create -> new Classical_AMG_Level -> new AMG_Level -> new Matrix We are just matching this Ac.set_initialized(0) setting here. */ Matrix<TConfig_hd> &Ac = nextLevel->getA(); Ac.set_initialized(0); } nextLevel->setLevelIndex( amg->num_levels ); level->getA().template setParameter<int>("level", amg->num_levels); #if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT if (async_global::singleton()->using_async_coloring) { struct task_setupsmoother : public task { AMG_Level<TConfig_hd> *level; bool coarseSolverExists; int profiler_color() {return 0x00ffff;} std::string name() { return "setup_smoother"; } void run() { // Setup smoother unless coarseSolver exists and reached coarsest level if ( !( level->isCoarsest() && coarseSolverExists ) ) { level->setup_smoother(); } } }; task_setupsmoother *task_setupsmoother_ = new task_setupsmoother; task_setupsmoother_->level = level; task_setupsmoother_->coarseSolverExists = coarseSolverExists; // create the aggregates (aggregation) or coarse points (classical) level->createCoarseVertices( ); enqueue_async(asyncmanager::singleton()->main_thread_queue(0), task_setupsmoother_); } else #endif { // only compute aggregates if we can't reuse existing ones if (!reuse_next_level) { level->createCoarseVertices( ); } } //set the amg_level_index for this matrix nextLevel->getA().amg_level_index = amg->num_levels; int64_t N = num_rows_global * level->getA().get_block_dimy(); num_rows[0] = num_rows_global = level->getNumCoarseVertices(); // Do reduction across all partitions if (level->getA().is_matrix_distributed()) { level->getA().manager->getComms()->global_reduce( partition_rows, num_rows, level->getA(), level->tag * 100 + 8 ); num_rows_global = 0; for (int i = 0; i < partition_rows.size(); i++) { num_rows_global += partition_rows[i][0]; } } // num_rows[0] contains the total number of rows across all partitions int64_t nextN = num_rows_global * level->getA().get_block_dimy(); if (!level->getA().is_matrix_distributed()) { min_partition_rows = num_rows[0]; } else { // level->setIsConsolidationLevel(true); // coaese root partions exited some time in classical if (!amg->m_sum_stopping_criteria) { min_partition_rows = INT_MAX; for (int i = 0; i < partition_rows.size(); i++) { // Before we did // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation // If classical AMG, include all partitions if (partition_rows[i][0] != 0) { min_partition_rows = ::min(partition_rows[i][0], min_partition_rows); } } // if we exit the previous loop with min_partition_rows == INT_MAX it means all next size are 0 if (min_partition_rows == INT_MAX) { min_partition_rows = 0; } } else { // use sum instead of min min_partition_rows = 0; for (int i = 0; i < partition_rows.size(); i++) { // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation // If classical AMG, include all partitions if (partition_rows[i][0] != 0) { min_partition_rows += partition_rows[i][0]; } } } } // stop here if next level size is < min_rows if ( nextN <= amg->coarsen_threshold * N && nextN != N && min_partition_rows >= min_rows ) { level->createCoarseMatrices(); // Resize coarse vectors. int nextSize = level->getNextLevelSize(); level->getxc( ).resize( nextSize ); level->getxc().set_block_dimy(level->getA( ).get_block_dimy()); level->getxc().set_block_dimx(1); level->getxc().tag = nextLevel->tag * 100 + 1; level->getbc( ).resize( nextSize ); level->getbc().set_block_dimy(level->getA( ).get_block_dimy()); level->getbc().set_block_dimx(1); level->getbc().tag = nextLevel->tag * 100 + 0; int size, offset; level->getA().getOffsetAndSizeForView(FULL, &offset, &size); level->getr().resize( size * level->getA( ).get_block_dimy() ); level->getr().set_block_dimy(level->getA( ).get_block_dimy()); level->getr().set_block_dimx(1); level->getr().tag = nextLevel->tag * 100 + 2; } else { // delete next level that we just created level->deleteNextLevel( memorySpaceTag ); } #if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT if (async_global::singleton()->using_async_coloring) { //cancel the CPU coloring task if the GPU is idle hipStreamSynchronize(thrust::global_thread_handle::get_stream()); enqueue_async(asyncmanager::singleton()->global_parallel_queue, async_global::singleton()->cancel_cpu_coloring_task); //wait for every spawning task asyncmanager::singleton()->waitall(); } else #endif // If n is 0 then the matrix is consolidated so we don't setup the smoother if (!level->isCoarsest() || (!coarseSolverExists)) { level->setup_smoother(); } if (level->isCoarsest()) { break; } // Barrier (might be removed) // ****************************************** if (level->getA().is_matrix_distributed()) { level->getA().manager->getComms()->barrier(); } // ****************************************** nextLevel->setup(); nextLevel->getA().setResources(level->getA().getResources()); #if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT // color the matrix ASAP if (!nextmin_fine_rowsmin_fine_rowsmin_fine_rowsLevel->getA().is_matrix_setup()) { nextLevel->getA().setupMatrix(nextLevel->getSmoother(), *amg->m_cfg, false); } #endif // Move to the next level. prev_level = level; level = nextLevel; // Increment the level counter. amg->num_levels++; } //end of while(true) #if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT hipStreamSynchronize(thrust::global_thread_handle::threadStream[getCurrentThreadId()]); thrust::global_thread_handle::threadStream[getCurrentThreadId()] = 0; #endif return prev_level; } template< typename TConfig_hd > static int glue_level(AMG<t_vecPrec, t_matPrec, t_indPrec> *amg, AMG_Level<TConfig_hd> *&level, int num_active_parts) { #ifdef AMGX_WITH_MPI if (level->getA().manager->getComms() != NULL) { MPI_Comm A_com, temp_com; int new_num_parts, n_global, num_parts, avg; bool wantneighbors = true; A_com = level->getA().manager->getComms()->get_mpi_comm(); if (level->getA().manager->part_offsets_h.size() == 0) // create part_offsets_h & part_offsets { create_part_offsets(A_com, level->getA()); } n_global = level->getA().manager->part_offsets_h.back(); num_parts = level->getA().manager->getComms()->get_num_partitions(); avg = n_global / num_parts; level->getA().manager->computeDestinationPartitions(amg->m_consolidation_upper_threshold, avg, num_parts, new_num_parts, wantneighbors); if (new_num_parts != num_active_parts) { // Compute consolidation info compute_glue_info(level->getA()); // Compute a temporary splited communicator to glue matrices temp_com = compute_glue_matrices_communicator(level->getA()); // glue_matrices does the following : unpack --> glue --> upload --> repack glue_matrices(level->getA(), A_com, temp_com); return new_num_parts; } else { return num_active_parts; } } else { return 0; } #else return 0; #endif } template< typename TConfig0, AMGX_MemorySpace MemSpace0, AMGX_MemorySpace MemSpace1 > static void setup( AMG<t_vecPrec, t_matPrec, t_indPrec> *amg, Matrix<TConfig0> &A ) { typedef typename TConfig0::template setMemSpace<MemSpace1>::Type TConfig1; typedef typename MemorySpaceMap<MemSpace0>::Type MemorySpace0; typedef typename MemorySpaceMap<MemSpace1>::Type MemorySpace1; MemorySpace0 memorySpaceTag0; MemorySpace1 memorySpaceTag1; // delete zero level from other memoryspace if (amg->getFinestLevel(memorySpaceTag1) != NULL) { delete amg->getFinestLevel(memorySpaceTag1); AMG_Level<TConfig1> *level_0_1 = NULL; amg->setFinestLevel(level_0_1); } int min_fine_rows = amg->min_fine_rows; int min_coarse_rows = amg->min_coarse_rows; // Make sure the number of fine rows is never smaller than min_coarse_rows. min_fine_rows = ::max( min_fine_rows, min_coarse_rows ); // Reset AMG hierarchy. amg->num_levels = 1; // Build levels on the first device. AMG_Level<TConfig0> *level_0 = amg->getFinestLevel(memorySpaceTag0), *prev_level_0 = 0L; // if resetup if (level_0->isSetup() && amg->m_structure_reuse_levels == 0) { delete level_0; level_0 = AMG_LevelFactory<TConfig0>::allocate(amg); amg->setFinestLevel( level_0 ); } level_0->setA(A); level_0->setLevelIndex( 0 ); level_0->setup(); if (level_0->isClassicalAMGLevel() && amg->m_amg_consolidation_flag == 1 && level_0->getA().is_matrix_distributed()) { #ifdef AMGX_WITH_MPI if (amg->m_consolidation_lower_threshold == 0 ) // m_consolidation_lower_threshold is unset { int root = 0; int max = 0, min = 0; MPI_Comm comm = level_0->getA().manager->getComms()->get_mpi_comm(); if (level_0->getA().manager->global_id() == 0 ) { size_t avail, total; hipMemGetInfo (&avail, &total); size_t used = level_0->bytes(); // Memory used by the finest level. size_t hierarchy = 6 * used; // Estimation of the size of the hierarchy size_t overhead = 1000000000; // 1GB of storage for other AMGX stuff // The Strength factor represents how many time a matrix like the one we localy have can fit into this GPU // This is based on the one we have on the finest level on rank 0 and considering the total hierarchy can be 6x larger double strength = (static_cast<double>(total - overhead)) / hierarchy; // The sum of memory required by coarse levels should be (approximately) smaller or equal than 6x the memory requiered by the finest level. // This assume a good load balencing // We should check when we glue matrices that we are not going out of memory. if (strength > 1.0) { int rows = level_0->getNumRows(); max = (strength * rows) / 6; // We divide by 6 because we increase the size of the following coarse levels by increasing the size of the current matrix if (max > 0) { min = max - 1; } else { max = 1; min = 0; } } else { max = 1; min = 0; } } MPI_Bcast( &max, 1, MPI_INT, root, comm ); MPI_Bcast( &min, 1, MPI_INT, root, comm ); amg->m_consolidation_lower_threshold = min; amg->m_consolidation_upper_threshold = max; } if (amg->m_consolidation_lower_threshold > 0) { prev_level_0 = setup_v2<TConfig0>( amg, level_0, min_fine_rows, min_fine_rows > min_coarse_rows ); // entering in gluing path } else #endif { prev_level_0 = setup<TConfig0>( amg, level_0, min_fine_rows, min_fine_rows > min_coarse_rows ); // no glue because the matrix is too big } } else { prev_level_0 = setup<TConfig0>( amg, level_0, min_fine_rows, min_fine_rows > min_coarse_rows ); // usual path / aggregation consolidation path } // Move to the other memory space if needed. if ( min_fine_rows == min_coarse_rows ) { Solver<TConfig0> *coarseSolver = amg->getCoarseSolver( memorySpaceTag0 ); if ( coarseSolver ) { coarseSolver->setup( level_0->getA(), false ); } } else { AMG_Level<TConfig1> *level_1 = AMG_LevelFactory<TConfig1>::allocate(amg); amg->setFinestLevel( level_1 ); level_1->getA( ).copy( level_0->getA( ) ); level_1->setLevelIndex( level_0->getLevelIndex( ) ); level_1->setup(); // Make that level the next one in the hierarchy. if ( prev_level_0 ) { prev_level_0->setNextLevel( level_1 ); assert( prev_level_0->getNextLevel( memorySpaceTag0 ) == level_0 ); prev_level_0->deleteNextLevel( memorySpaceTag0 ); } // Build the hierarchy. setup<TConfig1>( amg, level_1, min_coarse_rows, false ); // Build the coarse solver. Solver<TConfig1> *coarseSolver = amg->getCoarseSolver( memorySpaceTag1 ); if ( coarseSolver ) { coarseSolver->setup( level_1->getA(), false ); } } // Used only for device modes without hybrid mode. After reaching level where numrows <= amg_host_levels_rows // it creates copy of the hierarchy starting with this level. // This is experimental feauture intended to measure scaling of the solve part when coarse levels are on the host. if (amg->m_amg_host_levels_rows > 0) { AMG_Level<TConfig0> *d_cur_lvl = amg->getFinestLevel(memorySpaceTag0); AMG_Level<TConfig1> *h_cur_lvl = NULL, *h_prev_lvl = NULL; AMG_Level<TConfig0> *last_dev_lvl = NULL; AMG_Level<TConfig1> *first_host_lvl = NULL; while (d_cur_lvl != NULL) { if (d_cur_lvl->getNumRows() <= amg->m_amg_host_levels_rows) { break; } last_dev_lvl = d_cur_lvl; d_cur_lvl = d_cur_lvl->getNextLevel( memorySpaceTag0 ); } if (d_cur_lvl != NULL) { while (d_cur_lvl != NULL) { h_cur_lvl = AMG_LevelFactory<TConfig1>::allocate(amg, amg->tmng); h_cur_lvl->transfer_from(d_cur_lvl); h_cur_lvl->setup(); if (amg->getCoarseSolver(memorySpaceTag0) != NULL) { //remove coarse solver on the device delete amg->getCoarseSolver(memorySpaceTag0); amg->setCoarseSolver(NULL, memorySpaceTag0); // it should exist for the host, but check nevertheless Solver<TConfig1> *coarseSolver = amg->getCoarseSolver( memorySpaceTag1 ); bool coarseSolverExists = coarseSolver != NULL; if (!coarseSolverExists) { FatalError("Need to recrreate coarse solver got the host", AMGX_ERR_NOT_IMPLEMENTED); } } else { h_cur_lvl->setup_smoother(); } if (first_host_lvl == NULL) { first_host_lvl = h_cur_lvl; } if (h_prev_lvl != NULL) { h_prev_lvl->setNextLevel(h_cur_lvl); } h_prev_lvl = h_cur_lvl; h_cur_lvl = NULL; d_cur_lvl = d_cur_lvl->getNextLevel(memorySpaceTag0); } // cleanup unnecessary device hierarchy part delete last_dev_lvl->getNextLevel(memorySpaceTag0); // link last device level to the first host level last_dev_lvl->setNextLevel(first_host_lvl); last_dev_lvl->resetNextLevel(memorySpaceTag0); // tell amg that there are host levels amg->setFinestLevel( first_host_lvl ); } } MemoryInfo::updateMaxMemoryUsage(); logDeviceType<TConfig0>( ); logDeviceType<TConfig1>( ); } }; /********************************************************** * Solves the AMG system *********************************************************/ template< class T_Config > class AMG_Solve { typedef T_Config TConfig; static const AMGX_VecPrecision vecPrec = TConfig::vecPrec; static const AMGX_MatPrecision matPrec = TConfig::matPrec; static const AMGX_IndPrecision indPrec = TConfig::indPrec; typedef typename TConfig::MemSpace MemorySpace; typedef Matrix<TConfig> Matrix_hd; typedef Vector<TConfig> Vector_hd; typedef Vector<TemplateConfig<AMGX_host, vecPrec, matPrec, indPrec> > Vector_h; typedef T_Config TConfig_hd; typedef AMG<vecPrec, matPrec, indPrec> AMG_Class; public: static void solve_iteration( AMG_Class *amg, Vector_hd &b, Vector_hd &x) { hipStreamSynchronize(0); nvtxRange amg_si("amg_solve_iteration"); MemorySpace memorySpaceTag; AMG_Level<TConfig_hd> *fine = amg->getFinestLevel( memorySpaceTag ); assert(fine != NULL); CycleFactory<TConfig>::generate( amg, fine, b, x ); fine->unsetInitCycle(); // Note: this sometimes takes too much time on host making GPU idle. // Solve is not that important for memory - main mem usage comes from setup. // Disabling this call for now //MemoryInfo::updateMaxMemoryUsage(); hipStreamSynchronize(0); } }; // Setup the hierarchy to solve on host/device. template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::setup( Matrix_h &A ) { if ( m_dense_lu_num_rows > 0 ) { min_coarse_rows = m_dense_lu_num_rows / A.get_block_dimy(); } // read reuse structure levels option from config in case it has been changed // this allows fine control over the reuse of hierarchies if setup/solve is called multiple times m_structure_reuse_levels = m_cfg->getParameter<int>("structure_reuse_levels", m_cfg_scope); AMG_Setup<t_vecPrec, t_matPrec, t_indPrec>::template setup<TConfig_h, AMGX_host, AMGX_device>( this, A ); // Don't need the workspace anymore if ( d2_workspace != NULL && d2_workspace != csr_workspace ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( d2_workspace ); csr_workspace = NULL; } if ( csr_workspace != NULL ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( csr_workspace ); csr_workspace = NULL; } } template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::setup( Matrix_d &A ) { if ( m_dense_lu_num_rows > 0 ) { min_coarse_rows = m_dense_lu_num_rows / A.get_block_dimy(); } // read reuse structure levels option from config in case it has been changed // this allows fine control over the reuse of hierarchies if setup/solve is called multiple times m_structure_reuse_levels = m_cfg->getParameter<int>("structure_reuse_levels", m_cfg_scope); AMG_Setup<t_vecPrec, t_matPrec, t_indPrec>::template setup<TConfig_d, AMGX_device, AMGX_host>( this, A ); // Don't need the workspace anymore if ( d2_workspace != NULL && d2_workspace != csr_workspace ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( d2_workspace ); csr_workspace = NULL; } if ( csr_workspace != NULL ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( csr_workspace ); csr_workspace = NULL; } } // Setup the hierarchy to solve on host. template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::setup( AMG_Level<TConfig_h> *level ) { AMG_Setup<t_vecPrec, t_matPrec, t_indPrec>::template setup<TConfig_h>( this, level, 2, false ); } template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > void AMG<t_vecPrec, t_matPrec, t_indPrec>::setup( AMG_Level<TConfig_d> *level ) { AMG_Setup<t_vecPrec, t_matPrec, t_indPrec>::template setup<TConfig_d>( this, level, 2, false ); } template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > void AMG<t_vecPrec, t_matPrec, t_indPrec>::solve_init( Vector_d &b, Vector_d &x, bool xIsZero) { if (xIsZero) { fine_d->setInitCycle(); } } template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > void AMG<t_vecPrec, t_matPrec, t_indPrec>::solve_init( Vector_h &b, Vector_h &x, bool xIsZero) { if (xIsZero) { fine_h->setInitCycle(); } } template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > void AMG<t_vecPrec, t_matPrec, t_indPrec>::solve_iteration( Vector_d &b, Vector_d &x) { AMGX_CPU_PROFILER( "AMG::solve_iteration " ); AMG_Solve<TConfig_d>::solve_iteration( this, b, x); } template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > void AMG<t_vecPrec, t_matPrec, t_indPrec>::solve_iteration( Vector_h &b, Vector_h &x) { AMGX_CPU_PROFILER( "AMG::solve_iteration " ); AMG_Solve<TConfig_h>::solve_iteration( this, b, x); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::getGridStatisticsString(std::stringstream &ss) { AMG_Level<TConfig_d> *level_d = this->fine_d; AMG_Level<TConfig_h> *level_h = this->fine_h; int64_t total_rows = 0; int64_t total_nnz = 0; float total_size = 0; ss << "AMG Grid:\n"; ss << " Number of Levels: " << this->num_levels << endl; AMGXLOG("Number of Levels", this->num_levels) ss << std::setw(15) << "LVL" << std::setw(13) << "ROWS" << std::setw(18) << "NNZ" << std::setw(10) << "SPRSTY" << std::setw(15) << "Mem (GB)" << std::endl; ss << " --------------------------------------------------------------\n"; while (level_d != NULL) { int has_diag = level_d->getA( ).hasProps(DIAG) ? 1 : 0; int64_t num_rows = (int)(level_d->getA( ).get_num_rows() * level_d->getA( ).get_block_dimy()); int64_t nnz = (int)((level_d->getA( ).get_num_nz() + has_diag * level_d->getA( ).get_num_rows()) * level_d->getA( ).get_block_dimy() * level_d->getA( ).get_block_dimx()); float size = level_d->bytes(true) / 1024.0 / 1024 / 1024; // If aggregation AMG, skip this if # of neighbors = 0, since we're consolidating // If classical AMG, we need to enter here since ranks are allowed to have 0 rows (or no neighbors) if ( !level_d->getA().is_matrix_singleGPU() || (level_d->isClassicalAMGLevel() && level_d->getA().is_matrix_distributed()) ) { level_d->getA().manager->global_reduce_sum(&num_rows); level_d->getA().manager->global_reduce_sum(&nnz); level_d->getA().manager->global_reduce_sum(&size); } total_rows += num_rows; total_nnz += nnz; total_size += size; double sparsity = nnz / (double) ( num_rows * num_rows); ss << std::setw(12) << level_d->getLevelIndex( ) << "(D)" << std::setw(13) << num_rows << std::setw(18) << nnz << std::setw(10) << std::setprecision(3) << sparsity << std::setw(15) << size << std::setprecision(6) << std::endl; level_d = level_d->getNextLevel( device_memory( ) ); } while (level_h != NULL) { int has_diag = level_h->getA( ).hasProps(DIAG) ? 1 : 0; int64_t num_rows = (int)(level_h->getA( ).get_num_rows() * level_h->getA( ).get_block_dimy()); int64_t nnz = (int)((level_h->getA( ).get_num_nz() + has_diag * level_h->getA( ).get_num_rows()) * level_h->getA( ).get_block_dimy() * level_h->getA( ).get_block_dimx()); float size = level_h->bytes(true) / 1024.0 / 1024 / 1024; // If aggregation AMG, skip this if # of neighbors = 0, since we're consolidating // If classical AMG, we need to enter here since ranks are allowed to have 0 rows (or no neighbors) if ( !level_h->getA().is_matrix_singleGPU() || (level_h->isClassicalAMGLevel() && level_h->getA().is_matrix_distributed()) ) { level_h->getA().manager->global_reduce_sum(&num_rows); level_h->getA().manager->global_reduce_sum(&nnz); level_h->getA().manager->global_reduce_sum(&size); } total_rows += num_rows; total_nnz += nnz; total_size += size; double sparsity = nnz / (double) ( num_rows * num_rows); ss << std::setw(12) << level_h->getLevelIndex( ) << "(H)" << std::setw(13) << num_rows << std::setw(18) << nnz << std::setw(10) << std::setprecision(3) << sparsity << std::setw(15) << size << std::setprecision(6) << std::endl; level_h = level_h->getNextLevel( host_memory( ) ); } int64_t fine_rows; int64_t fine_nnz; if (this->fine_h) { fine_rows = this->fine_h->getA( ).get_num_rows() * this->fine_h->getA( ).get_block_dimy(); fine_nnz = this->fine_h->getA( ).get_block_dimy() * this->fine_h->getA( ).get_block_dimx() * ( this->fine_h->getA( ).get_num_nz() + (this->fine_h->getA( ).hasProps(DIAG) ? this->fine_h->getA( ).get_num_rows() : 0) ) ; if (this->fine_h->getA().is_matrix_distributed()) { this->fine_h->getA().manager->global_reduce_sum(&fine_rows); this->fine_h->getA().manager->global_reduce_sum(&fine_nnz); } } else { fine_rows = this->fine_d->getA( ).get_num_rows() * this->fine_d->getA( ).get_block_dimy() ; fine_nnz = this->fine_d->getA( ).get_block_dimy() * this->fine_d->getA( ).get_block_dimx() * ( this->fine_d->getA( ).get_num_nz() + (this->fine_d->getA( ).hasProps(DIAG) ? this->fine_d->getA( ).get_num_rows() : 0) ); if (this->fine_d->getA().is_matrix_distributed()) { this->fine_d->getA().manager->global_reduce_sum(&fine_rows); this->fine_d->getA().manager->global_reduce_sum(&fine_nnz); } } ss << " --------------------------------------------------------------\n"; ss << " Grid Complexity: " << total_rows / (double) fine_rows << std::endl; ss << " Operator Complexity: " << total_nnz / (double) fine_nnz << std::endl; ss << " Total Memory Usage: " << total_size << " GB" << std::endl; ss << " --------------------------------------------------------------\n"; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::printGridStatistics( ) { std::stringstream ss; this->getGridStatisticsString(ss); amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::getGridStatisticsString2(std::stringstream &ss) { AMG_Level<TConfig_d> *level_d = this->fine_d; AMG_Level<TConfig_h> *level_h = this->fine_h; int total_rows = 0; int total_nnz = 0; float total_size = 0; ss << " multigrid levels:\n"; while (level_d != NULL) { int has_diag = level_d->getA( ).hasProps(DIAG) ? 1 : 0; total_rows += (int)(level_d->getA( ).get_num_rows() * level_d->getA( ).get_block_dimy()); total_nnz += (int)((level_d->getA( ).get_num_nz() + has_diag * level_d->getA( ).get_num_rows()) * level_d->getA( ).get_block_dimy() * level_d->getA( ).get_block_dimx()); float size = level_d->bytes() / 1024.0 / 1024 / 1024; total_size += size; ss << std::setw(5) << level_d->getLevelIndex( ) << " " << std::setw(5) << level_d->getA( ).get_num_rows() << std::endl; level_d = level_d->getNextLevel( device_memory( ) ); } while (level_h != NULL) { int has_diag = level_h->getA( ).hasProps(DIAG) ? 1 : 0; total_rows += (int)(level_h->getA( ).get_num_rows() * level_h->getA( ).get_block_dimy()); total_nnz += (int)((level_h->getA( ).get_num_nz() + has_diag * level_h->getA( ).get_num_rows()) * level_h->getA( ).get_block_dimy() * level_h->getA( ).get_block_dimx()); float size = level_h->bytes() / 1024.0 / 1024 / 1024; total_size += size; ss << std::setw(5) << level_h->getLevelIndex( ) << " " << std::setw(5) << level_h->getA( ).get_num_rows() << std::endl; level_h = level_h->getNextLevel( host_memory( ) ); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::printGridStatistics2( ) { std::stringstream ss; this->getGridStatisticsString2(ss); amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } using std::scientific; using std::fixed; // print a line of length l, starting at character s void printLine(const int l, const int s) { std::stringstream ss; ss << setw(s) << " "; for (int i = 0; i < l; i++) { ss << "-"; } ss << endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::printCoarsePoints() { #ifdef DEBUG typedef std::vector<int> iVec; typedef std::vector<int>::iterator iVecIter; ofstream coarsePoints("coarse_points.dat"); iVec originalRows; AMG_Level<TConfig_d> *level_d = fine_d; while ( level_d != NULL ) { originalRows = level_d->getOriginalRows(); level_d = level_d->next_d; if (level_d == NULL) { break; } coarsePoints << level_d->level_id << " " << level_d->getNumRows() << endl; for (iVecIter it = originalRows.begin(); it != originalRows.end(); ++it) { coarsePoints << *it << endl; } } AMG_Level<TConfig_h> *level_h = fine_h; while ( level_h != NULL ) { originalRows = level_h->getOriginalRows(); level_h = level_h->next_h; if (level_h == NULL) { break; } coarsePoints << level_h->level_id << " " << level_h->getNumRows() << endl; for (iVecIter it = originalRows.begin(); it != originalRows.end(); ++it) { coarsePoints << *it << endl; } } coarsePoints.close(); #endif } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::printConnections() { #ifdef DEBUG ofstream connFile("connections.dat"); AMG_Level<TConfig_d> *level_d = fine_d; Matrix_d ATemp_d; while (level_d != NULL) { connFile << level_d->level_id << " " << level_d->getNumRows() << endl; ATemp_d = level_d->getA(); for (int i = 0; i < ATemp_d.get_num_rows(); i++) { // get the row offset & num rows int offset = ATemp_d.row_offsets[i]; int numEntries = ATemp_d.row_offsets[i + 1] - offset; // # of connections is numEntries - 1 (ignoring diagonal) // this->numConnections.push_back(numEntries-1); connFile << numEntries - 1 << " "; // loop over non-zeros and add non-diagonal terms for (int j = offset; j < offset + numEntries; j++) { int columnIndex = ATemp_d.column_indices[j]; if (i != columnIndex) { // this->connections.push_back(columnIndex); connFile << columnIndex << " "; } } connFile << endl; } level_d = level_d->next_d; } AMG_Level<TConfig_h> *level_h = fine_h; Matrix_h ATemp_h; while (level_h != NULL) { connFile << level_h->level_id << " " << level_h->getNumRows() << endl; ATemp_d = level_h->getA(); for (int i = 0; i < ATemp_h.get_num_rows(); i++) { // get the row offset & num rows int offset = ATemp_h.row_offsets[i]; int numEntries = ATemp_h.row_offsets[i + 1] - offset; // # of connections is numEntries - 1 (ignoring diagonal) // this->numConnections.push_back(numEntries-1); connFile << numEntries - 1 << " "; // loop over non-zeros and add non-diagonal terms for (int j = offset; j < offset + numEntries; j++) { int columnIndex = ATemp_h.column_indices[j]; if (i != columnIndex) { // this->connections.push_back(columnIndex); connFile << columnIndex << " "; } } connFile << endl; } level_h = level_h->next_h; } #endif } /**************************************** * Explict instantiations ***************************************/ // real valued case template class AMG<AMGX_vecDouble, AMGX_matDouble, AMGX_indInt>; template class AMG<AMGX_vecFloat, AMGX_matFloat, AMGX_indInt>; template class AMG<AMGX_vecDouble, AMGX_matFloat, AMGX_indInt>; // complex valued case template class AMG<AMGX_vecComplex, AMGX_matComplex, AMGX_indInt>; template class AMG<AMGX_vecDoubleComplex, AMGX_matComplex, AMGX_indInt>; template class AMG<AMGX_vecDoubleComplex, AMGX_matDoubleComplex, AMGX_indInt>; } // namespace amgx
db74ad4c4af645c1dd7b0ac701101bed6088e375.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <util.h> #include <amgx_timer.h> #include <amg.h> #include <basic_types.h> #include <types.h> #include <norm.h> #include <logger.h> #include <iostream> #include <iomanip> #include <blas.h> #include <multiply.h> #include <algorithm> #include <amg_level.h> #include <amgx_c.h> #include <profile.h> #include <distributed/glue.h> #include <misc.h> #include <string> #include <cassert> #include <csr_multiply.h> #include <memory_info.h> #include <thrust/sort.h> #include <thrust/remove.h> #include <thrust/unique.h> #include <thrust/binary_search.h> #include <thrust/iterator/constant_iterator.h> namespace amgx { template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> AMG<t_vecPrec, t_matPrec, t_indPrec> ::AMG(AMG_Config &cfg, const std::string &cfg_scope) : fine_h(0), fine_d(0), m_cfg(&cfg), m_cfg_scope(cfg_scope), ref_count(1), csr_workspace(NULL), d2_workspace(NULL) { cycle_iters = cfg.getParameter<int>("cycle_iters", cfg_scope); norm = cfg.getParameter<NormType>("norm", cfg_scope); max_levels = cfg.getParameter<int>( "max_levels", cfg_scope ); coarsen_threshold = cfg.getParameter<double>("coarsen_threshold", cfg_scope); min_fine_rows = cfg.getParameter<int>( "min_fine_rows", cfg_scope ); min_coarse_rows = cfg.getParameter<int>( "min_coarse_rows", cfg_scope); m_amg_consolidation_flag = cfg.getParameter<int>("amg_consolidation_flag", cfg_scope); m_consolidation_lower_threshold = cfg.getParameter<int>("matrix_consolidation_lower_threshold", cfg_scope); m_consolidation_upper_threshold = cfg.getParameter<int>("matrix_consolidation_upper_threshold", cfg_scope); m_sum_stopping_criteria = cfg.getParameter<int>("use_sum_stopping_criteria", cfg_scope); m_structure_reuse_levels = cfg.getParameter<int>("structure_reuse_levels", cfg_scope); m_amg_host_levels_rows = cfg.getParameter<int>("amg_host_levels_rows", cfg_scope); if (m_consolidation_upper_threshold <= m_consolidation_lower_threshold) { FatalError("Error, matrix_consolidation_lower_threshold must be smaller than matrix_consolidation_upper_threshold", AMGX_ERR_CONFIGURATION); } std::string solverName, new_scope, tmp_scope; cfg.getParameter<std::string>( "coarse_solver", solverName, cfg_scope, new_scope ); if (solverName.compare("NOSOLVER") == 0) { coarse_solver_d = NULL; coarse_solver_h = NULL; } else { coarse_solver_d = SolverFactory<TConfig_d>::allocate(cfg, cfg_scope, "coarse_solver"); coarse_solver_h = SolverFactory<TConfig_h>::allocate(cfg, cfg_scope, "coarse_solver"); } //NOTE: //if dense_lu_num_rows=0 then either you are not using dense solver (it was not selected) or the matrix size for it to be used was set to zero //if dense_lu_max_rows=0 then either you are not using dense solver or you don't want to cap the maximum matrix size m_dense_lu_num_rows = 0; m_dense_lu_max_rows = 0; if ( solverName == "DENSE_LU_SOLVER" ) { m_dense_lu_num_rows = cfg.getParameter<int>( "dense_lu_num_rows", cfg_scope ); m_dense_lu_max_rows = cfg.getParameter<int>( "dense_lu_max_rows", cfg_scope ); } } template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::allocate_fine_level() { fine_d = AMG_LevelFactory<TConfig_d>::allocate(this, tmng); fine_h = AMG_LevelFactory<TConfig_h>::allocate(this, tmng); } // Print the settings used by amg solver template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::printSettings() const { std::cout << std::endl; std::cout << "AMG solver settings:" << std::endl; std::cout << "cycle_iters = " << cycle_iters << std::endl; std::cout << "norm = " << getString(norm) << std::endl; std::cout << "presweeps = " << getNumPresweeps() << std::endl; std::cout << "postsweeps = " << getNumPostsweeps() << std::endl; std::cout << "max_levels = " << max_levels << std::endl; std::cout << "coarsen_threshold = " << coarsen_threshold << std::endl; std::cout << "min_fine_rows = " << min_fine_rows << std::endl; std::cout << "min_coarse_rows = " << min_coarse_rows << std::endl; std::cout << "coarse_solver_d: " << this->coarse_solver_d->getName() << " with scope name " << this->coarse_solver_d->getScope() << std::endl; std::cout << "coarse_solver_h: " << this->coarse_solver_h->getName() << " with scope name " << this->coarse_solver_h->getScope() << std::endl; } template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> AMG<t_vecPrec, t_matPrec, t_indPrec>::~AMG() { if (fine_d) { delete fine_d; } if (fine_h) { delete fine_h; } // Don't delete both since the hierarchies meet at some point !!! delete coarse_solver_d; delete coarse_solver_h; if ( d2_workspace != NULL && d2_workspace != csr_workspace ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( d2_workspace ); csr_workspace = NULL; } if ( csr_workspace != NULL ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( csr_workspace ); csr_workspace = NULL; } } template<class T_Config> void logDeviceType() { AMGXLOG("Devicetype", T_Config::MemSpaceInfo::getName()) } /********************************************************** * Setups the AMG system *********************************************************/ void analyze_coloring(device_vector_alloc<int> aggregates_d, device_vector_alloc<int> colors_d); template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > class AMG_Setup { public: template< typename TConfig_hd > static AMG_Level<TConfig_hd> *setup( AMG<t_vecPrec, t_matPrec, t_indPrec> *amg, AMG_Level<TConfig_hd> *&level, int min_rows, bool hybrid ) { typedef typename TConfig_hd::MemSpace MemorySpace; typedef TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> TConfig_h; typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; typedef typename Matrix<TConfig_h>::IVector IVector_h; typedef typename Matrix<TConfig_d>::IVector IVector_d; typedef typename Matrix<TConfig_h>::VVector VVector_h; typedef typename Matrix<TConfig_d>::VVector VVector_d; typedef typename Matrix<TConfig_h>::MVector MVector_h; typedef typename Matrix<TConfig_d>::MVector MVector_d; typedef typename Matrix<TConfig_hd>::IVector IVector_hd; typedef typename Matrix<TConfig_hd>::VVector VVector_hd; typedef typename Matrix<TConfig_hd>::MVector MVector_hd; typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA; typedef typename VecPrecisionMap<t_vecPrec>::Type ValueTypeB; static const AMGX_MemorySpace other_memspace = MemorySpaceMap<opposite_memspace<TConfig_hd::memSpace>::memspace>::id; typedef TemplateConfig<other_memspace, t_vecPrec, t_matPrec, t_indPrec> TConfig1; typedef TConfig1 T_Config1; MemorySpace memorySpaceTag; // The previous level. AMG_Level<TConfig_hd> *prev_level = 0L; typedef TemplateConfig<AMGX_host, AMGX_vecInt, t_matPrec, t_indPrec> hvector_type; typedef Vector<hvector_type> HVector; std::vector<HVector> partition_rows(0); HVector num_rows(1); int64_t num_rows_global; num_rows[0] = num_rows_global = level->getNumRows( ); int min_partition_rows = num_rows[0]; if (level->getA().is_matrix_distributed()) { level->getA( ).manager->getComms()->global_reduce(partition_rows, num_rows, level->getA( ), level->tag * 100 + 7); num_rows_global = 0; for (int i = 0; i < partition_rows.size(); i++) { min_partition_rows = std::min(partition_rows[i][0], min_partition_rows); num_rows_global += partition_rows[i][0]; } } Solver<TConfig_hd> *coarseSolver = amg->getCoarseSolver( MemorySpace() ); bool coarseSolverExists = coarseSolver != NULL; // Build the remaining / all the levels on the CPU. Note: level_h is NULL if all the setup happened on the GPU. while (true) { nvtxRange test("setup_level"); //Check if you reached the coarsest level (min_partition_rows is the number of rows in this partition/rank) //NOTE: min_rows = min_coarse_rows if async framework is disabled (min_fine_rows =< min_coarse_rows) if (amg->num_levels >= amg->max_levels || min_partition_rows <= min_rows) { //Check if the user wishes to use DENSE_LU_SOLVER capping the matrix the size, and the matrix size exceeds the maximum allowed //NOTE: if dense_lu_max_rows=0 then either you are not using dense solver or you don't want to cap the maximum matrix size if ((amg->m_dense_lu_max_rows != 0) && (min_partition_rows > amg->m_dense_lu_max_rows)) { amg->setCoarseSolver(NULL, MemorySpace()); delete coarseSolver; coarseSolver = NULL; coarseSolverExists = false; } //Check if there is no coarse solver, then setup the smoother to solve the coarsest level if (!coarseSolverExists) { level->setup_smoother(); } return level; } // Allocate next level or use existing one int reuse_next_level; AMG_Level<TConfig_hd> *nextLevel; if (!level->getNextLevel(MemorySpace()) || (amg->m_structure_reuse_levels <= amg->num_levels && amg->m_structure_reuse_levels != -1)) { if (level->getNextLevel(MemorySpace())) { delete level->getNextLevel(MemorySpace()); } reuse_next_level = 0; level->setReuseLevel(false); nextLevel = AMG_LevelFactory<TConfig_hd>::allocate(amg, level->getSmoother()->get_thread_manager()); level->setNextLevel( nextLevel ); } else { // reuse existing next level reuse_next_level = 1; level->setReuseLevel(true); nextLevel = level->getNextLevel(MemorySpace()); /* WARNING: we do not recompute prolongation (P) and restriction (R) when we are reusing the level structure (structure_reuse_levels > 0), but we do need to modify an existing coarse matrix Ac=R*A*P. Instead of calling Ac.set_initialized(0) in every path afterwards, we wil call it here. Notice that in the if part of this statement above when the new level is allocated it creates a new matrix which is not initialized by default (see the matrix constructor): AMG_Level_Factory::allocate -> Classical_AMG_LevelFactory::create -> new Classical_AMG_Level -> new AMG_Level -> new Matrix We are just matching this Ac.set_initialized(0) setting here. */ Matrix<TConfig_hd> &Ac = nextLevel->getA(); Ac.set_initialized(0); } nextLevel->setLevelIndex( amg->num_levels ); level->getA().template setParameter<int>("level", amg->num_levels); //profileLevelDown( ); { // only compute aggregates if we can't reuse existing ones if (!reuse_next_level) { level->createCoarseVertices( ); } } //set the amg_level_index for this matrix nextLevel->getA().amg_level_index = amg->num_levels; int64_t N = num_rows_global * level->getA().get_block_dimy(); num_rows[0] = num_rows_global = level->getNumCoarseVertices(); if (level->getA().is_matrix_distributed()) { level->getA().manager->getComms()->global_reduce( partition_rows, num_rows, level->getA(), level->tag * 100 + 8 ); num_rows_global = 0; for (int i = 0; i < partition_rows.size(); i++) { num_rows_global += partition_rows[i][0]; } } // num_rows[0] contains the total number of rows across all partitions int64_t nextN = num_rows_global * level->getA().get_block_dimy(); if (!level->getA().is_matrix_distributed()) { min_partition_rows = num_rows[0]; } else { int num_parts = level->getA().manager->getComms()->get_num_partitions(); float avg_size = num_rows_global / num_parts; if (avg_size < amg->m_consolidation_lower_threshold) { if (level->isClassicalAMGLevel()) { FatalError("Consolidation with classical path not supported)", AMGX_ERR_NOT_IMPLEMENTED); } int new_num_parts; bool want_neighbors = false; level->getA().manager->computeDestinationPartitions(amg->m_consolidation_upper_threshold, avg_size, num_parts, new_num_parts, want_neighbors); if (new_num_parts != num_parts) { level->setIsConsolidationLevel(true); // Modify partition_rows so that non-consolidated partitions have 0 rows // Root partitions have total number of rows to consolidate IVector_h row_count_part(num_parts, 0); for (int i = 0; i < num_parts; i++) { row_count_part[level->getA().manager->getDestinationPartitions()[i]] += partition_rows[i][0]; } for (int i = 0; i < num_parts; i++) { partition_rows[i][0] = row_count_part[i]; } } } if (!amg->m_sum_stopping_criteria) { min_partition_rows = INT_MAX; for (int i = 0; i < partition_rows.size(); i++) { // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation // If classical AMG, include all partitions if ( level->isClassicalAMGLevel() || (!(level->isClassicalAMGLevel()) && partition_rows[i][0] != 0)) { min_partition_rows = std::min(partition_rows[i][0], min_partition_rows); } } } else { // use sum instead of min min_partition_rows = 0; for (int i = 0; i < partition_rows.size(); i++) { // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation // If classical AMG, include all partitions if ( level->isClassicalAMGLevel() || (!(level->isClassicalAMGLevel()) && partition_rows[i][0] != 0)) { min_partition_rows += partition_rows[i][0]; } } } } // stop here if next level size is < min_rows if ( nextN <= amg->coarsen_threshold * N && nextN != N && min_partition_rows >= min_rows ) { level->createCoarseMatrices(); // Resize coarse vectors. int nextSize = level->getNextLevelSize(); level->getxc( ).resize( nextSize ); level->getxc().set_block_dimy(level->getA( ).get_block_dimy()); level->getxc().set_block_dimx(1); level->getxc().tag = nextLevel->tag * 100 + 1; level->getbc( ).resize( nextSize ); level->getbc().set_block_dimy(level->getA( ).get_block_dimy()); level->getbc().set_block_dimx(1); level->getbc().tag = nextLevel->tag * 100 + 0; int size, offset; level->getA().getOffsetAndSizeForView(FULL, &offset, &size); level->getr().resize( size * level->getA( ).get_block_dimy() ); level->getr().set_block_dimy(level->getA( ).get_block_dimy()); level->getr().set_block_dimx(1); level->getr().tag = nextLevel->tag * 100 + 2; } else { // delete next level that we just created level->deleteNextLevel( memorySpaceTag ); } if (!level->isCoarsest() || !coarseSolverExists) { level->setup_smoother(); } if (level->isCoarsest()) { break; } // If consolidation level and not root partition, break; if (!level->getA().is_matrix_singleGPU() && level->isConsolidationLevel() && !level->getA().manager->isRootPartition()) { amg->setCoarseSolver(NULL, MemorySpace()); delete coarseSolver; coarseSolver = NULL; coarseSolverExists = false; break; } nextLevel->setup(); // Move to the next level. prev_level = level; level = nextLevel; // Increment the level counter. amg->num_levels++; } //end of while(true) return prev_level; } template< typename TConfig_hd > static AMG_Level<TConfig_hd> *setup_v2( AMG<t_vecPrec, t_matPrec, t_indPrec> *amg, AMG_Level<TConfig_hd> *&level, int min_rows, bool hybrid ) { typedef typename TConfig_hd::MemSpace MemorySpace; typedef TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> TConfig_h; typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; typedef typename Matrix<TConfig_h>::IVector IVector_h; typedef typename Matrix<TConfig_d>::IVector IVector_d; typedef typename Matrix<TConfig_h>::VVector VVector_h; typedef typename Matrix<TConfig_d>::VVector VVector_d; typedef typename Matrix<TConfig_h>::MVector MVector_h; typedef typename Matrix<TConfig_d>::MVector MVector_d; typedef typename Matrix<TConfig_hd>::IVector IVector_hd; typedef typename Matrix<TConfig_hd>::VVector VVector_hd; typedef typename Matrix<TConfig_hd>::MVector MVector_hd; typedef typename MatPrecisionMap<t_matPrec>::Type ValueTypeA; typedef typename VecPrecisionMap<t_vecPrec>::Type ValueTypeB; MemorySpace memorySpaceTag; // The previous level. AMG_Level<TConfig_hd> *prev_level = 0L; typedef TemplateConfig<AMGX_host, AMGX_vecInt, t_matPrec, t_indPrec> hvector_type; typedef Vector<hvector_type> HVector; std::vector<HVector> partition_rows(0); HVector num_rows(1); int64_t num_rows_global; num_rows[0] = num_rows_global = level->getNumRows( ); int min_partition_rows = INT_MAX, offset = 0, n = 0, num_parts = 1, num_active_parts = 0; float avg_size; if (level->getA().is_matrix_distributed()) { num_parts = level->getA().manager->getComms()->get_num_partitions(); level->getA( ).manager->getComms()->global_reduce(partition_rows, num_rows, level->getA( ), level->tag * 100 + 7); num_rows_global = 0; for (int i = 0; i < partition_rows.size(); i++) { if (partition_rows[i][0] != 0) { min_partition_rows = std::min(partition_rows[i][0], min_partition_rows); num_active_parts++; } num_rows_global += partition_rows[i][0]; } if (min_partition_rows == INT_MAX) { min_partition_rows = 0; } } IVector_h row_count_part(num_parts, 0); Solver<TConfig_hd> *coarseSolver = amg->getCoarseSolver( MemorySpace() ); bool coarseSolverExists = coarseSolver != NULL; // Build the remaining / all the levels on the CPU. Note: level_h is NULL if all the setup happened on the GPU. while (true) { // Glue matrices of the current level avg_size = num_rows_global / num_parts; // Allow to glue other levels tha 0 if COARSE_CLA_CONSO is true #if COARSE_CLA_CONSO if (level->getA().is_matrix_distributed() && avg_size < amg->m_consolidation_lower_threshold) { #else if (level->getA().is_matrix_distributed() && avg_size < amg->m_consolidation_lower_threshold && level->getLevelIndex() == 0) { #endif // Just remove level->getLevelIndex() == 0 in the previous test to allow coarse level consolidation #ifdef AMGX_WITH_MPI level->getA().manager->setIsGlued(false); int new_num_parts = glue_level(amg, level, num_active_parts); if (new_num_parts && new_num_parts != num_active_parts) { if (level->getA().manager->global_id() == 0) { std::cout << "Level " << level->getLevelIndex() << " has been consolidated : " << num_active_parts << " --> " << new_num_parts << std::endl; } // this is for coarse level consolidation if (level->getLevelIndex() > 0) { level->setIsConsolidationLevel(true); } level->setup(); num_active_parts = new_num_parts; // Modify partition_rows so that non-consolidated partitions have 0 rows // Root partitions have total number of rows to consolidate num_rows[0] = level->getNumRows(); level->getA().manager->getComms()->global_reduce( partition_rows, num_rows, level->getA(), level->tag * 100 + 33 ); // Update some local arrays and variables num_rows_global = 0; for (int i = 0; i < partition_rows.size(); i++) { num_rows_global += partition_rows[i][0]; } for (int i = 0; i < num_parts; i++) { row_count_part[level->getA().manager->getDestinationPartitions()[i]] += partition_rows[i][0]; } for (int i = 0; i < num_parts; i++) { partition_rows[i][0] = row_count_part[i]; } } else { level->getA().manager->setIsGlued(false); } #endif } level->getA().getOffsetAndSizeForView(OWNED, &offset, &n); if (!n) { // no coarse solver for empty matrices? // maybe we can deal with this in classical amg cycle amg->setCoarseSolver(NULL, MemorySpace()); delete coarseSolver; coarseSolver = NULL; coarseSolverExists = false; } //Check if you reached the coarsest level (min_partition_rows is the number of rows in this partition/rank) //NOTE: min_rows = min_coarse_rows if async framework is disabled (min_fine_rows =< min_coarse_rows) if (amg->num_levels >= amg->max_levels || min_partition_rows <= min_rows) { #if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT asyncmanager::singleton()->waitall(); #endif //Check if the user wishes to use DENSE_LU_SOLVER capping the matrix the size, and the matrix size exceeds the maximum allowed //NOTE: if dense_lu_max_rows=0 then either you are not using dense solver or you don't want to cap the maximum matrix size if ((amg->m_dense_lu_max_rows != 0) && (min_partition_rows > amg->m_dense_lu_max_rows)) { amg->setCoarseSolver(NULL, MemorySpace()); delete coarseSolver; coarseSolver = NULL; coarseSolverExists = false; } //Check if there is no coarse solver, then setup the smoother to solve the coarsest level // If n is 0 then the matrix is consolidated so we don't setup the smoother // We always setup the smoother on finest level if (!coarseSolverExists) { level->setup_smoother(); } return level; } // Allocate next level or use existing one int reuse_next_level; AMG_Level<TConfig_hd> *nextLevel; if (!level->getNextLevel(MemorySpace()) || (amg->m_structure_reuse_levels <= amg->num_levels && amg->m_structure_reuse_levels != -1)) { if (level->getNextLevel(MemorySpace())) { delete level->getNextLevel(MemorySpace()); } reuse_next_level = 0; level->setReuseLevel(false); nextLevel = AMG_LevelFactory<TConfig_hd>::allocate(amg, level->getSmoother()->get_thread_manager()); level->setNextLevel( nextLevel ); } else { // reuse existing next level reuse_next_level = 1; level->setReuseLevel(true); nextLevel = level->getNextLevel(MemorySpace()); /* WARNING: we do not recompute prolongation (P) and restriction (R) when we are reusing the level structure (structure_reuse_levels > 0), but we do need to modify an existing coarse matrix Ac=R*A*P. Instead of calling Ac.set_initialized(0) in every path afterwards, we wil call it here. Notice that in the if part of this statement above when the new level is allocated it creates a new matrix which is not initialized by default (see the matrix constructor): AMG_Level_Factory::allocate -> Classical_AMG_LevelFactory::create -> new Classical_AMG_Level -> new AMG_Level -> new Matrix We are just matching this Ac.set_initialized(0) setting here. */ Matrix<TConfig_hd> &Ac = nextLevel->getA(); Ac.set_initialized(0); } nextLevel->setLevelIndex( amg->num_levels ); level->getA().template setParameter<int>("level", amg->num_levels); #if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT if (async_global::singleton()->using_async_coloring) { struct task_setupsmoother : public task { AMG_Level<TConfig_hd> *level; bool coarseSolverExists; int profiler_color() {return 0x00ffff;} std::string name() { return "setup_smoother"; } void run() { // Setup smoother unless coarseSolver exists and reached coarsest level if ( !( level->isCoarsest() && coarseSolverExists ) ) { level->setup_smoother(); } } }; task_setupsmoother *task_setupsmoother_ = new task_setupsmoother; task_setupsmoother_->level = level; task_setupsmoother_->coarseSolverExists = coarseSolverExists; // create the aggregates (aggregation) or coarse points (classical) level->createCoarseVertices( ); enqueue_async(asyncmanager::singleton()->main_thread_queue(0), task_setupsmoother_); } else #endif { // only compute aggregates if we can't reuse existing ones if (!reuse_next_level) { level->createCoarseVertices( ); } } //set the amg_level_index for this matrix nextLevel->getA().amg_level_index = amg->num_levels; int64_t N = num_rows_global * level->getA().get_block_dimy(); num_rows[0] = num_rows_global = level->getNumCoarseVertices(); // Do reduction across all partitions if (level->getA().is_matrix_distributed()) { level->getA().manager->getComms()->global_reduce( partition_rows, num_rows, level->getA(), level->tag * 100 + 8 ); num_rows_global = 0; for (int i = 0; i < partition_rows.size(); i++) { num_rows_global += partition_rows[i][0]; } } // num_rows[0] contains the total number of rows across all partitions int64_t nextN = num_rows_global * level->getA().get_block_dimy(); if (!level->getA().is_matrix_distributed()) { min_partition_rows = num_rows[0]; } else { // level->setIsConsolidationLevel(true); // coaese root partions exited some time in classical if (!amg->m_sum_stopping_criteria) { min_partition_rows = INT_MAX; for (int i = 0; i < partition_rows.size(); i++) { // Before we did // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation // If classical AMG, include all partitions if (partition_rows[i][0] != 0) { min_partition_rows = std::min(partition_rows[i][0], min_partition_rows); } } // if we exit the previous loop with min_partition_rows == INT_MAX it means all next size are 0 if (min_partition_rows == INT_MAX) { min_partition_rows = 0; } } else { // use sum instead of min min_partition_rows = 0; for (int i = 0; i < partition_rows.size(); i++) { // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation // If classical AMG, include all partitions if (partition_rows[i][0] != 0) { min_partition_rows += partition_rows[i][0]; } } } } // stop here if next level size is < min_rows if ( nextN <= amg->coarsen_threshold * N && nextN != N && min_partition_rows >= min_rows ) { level->createCoarseMatrices(); // Resize coarse vectors. int nextSize = level->getNextLevelSize(); level->getxc( ).resize( nextSize ); level->getxc().set_block_dimy(level->getA( ).get_block_dimy()); level->getxc().set_block_dimx(1); level->getxc().tag = nextLevel->tag * 100 + 1; level->getbc( ).resize( nextSize ); level->getbc().set_block_dimy(level->getA( ).get_block_dimy()); level->getbc().set_block_dimx(1); level->getbc().tag = nextLevel->tag * 100 + 0; int size, offset; level->getA().getOffsetAndSizeForView(FULL, &offset, &size); level->getr().resize( size * level->getA( ).get_block_dimy() ); level->getr().set_block_dimy(level->getA( ).get_block_dimy()); level->getr().set_block_dimx(1); level->getr().tag = nextLevel->tag * 100 + 2; } else { // delete next level that we just created level->deleteNextLevel( memorySpaceTag ); } #if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT if (async_global::singleton()->using_async_coloring) { //cancel the CPU coloring task if the GPU is idle cudaStreamSynchronize(thrust::global_thread_handle::get_stream()); enqueue_async(asyncmanager::singleton()->global_parallel_queue, async_global::singleton()->cancel_cpu_coloring_task); //wait for every spawning task asyncmanager::singleton()->waitall(); } else #endif // If n is 0 then the matrix is consolidated so we don't setup the smoother if (!level->isCoarsest() || (!coarseSolverExists)) { level->setup_smoother(); } if (level->isCoarsest()) { break; } // Barrier (might be removed) // ****************************************** if (level->getA().is_matrix_distributed()) { level->getA().manager->getComms()->barrier(); } // ****************************************** nextLevel->setup(); nextLevel->getA().setResources(level->getA().getResources()); #if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT // color the matrix ASAP if (!nextmin_fine_rowsmin_fine_rowsmin_fine_rowsLevel->getA().is_matrix_setup()) { nextLevel->getA().setupMatrix(nextLevel->getSmoother(), *amg->m_cfg, false); } #endif // Move to the next level. prev_level = level; level = nextLevel; // Increment the level counter. amg->num_levels++; } //end of while(true) #if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT cudaStreamSynchronize(thrust::global_thread_handle::threadStream[getCurrentThreadId()]); thrust::global_thread_handle::threadStream[getCurrentThreadId()] = 0; #endif return prev_level; } template< typename TConfig_hd > static int glue_level(AMG<t_vecPrec, t_matPrec, t_indPrec> *amg, AMG_Level<TConfig_hd> *&level, int num_active_parts) { #ifdef AMGX_WITH_MPI if (level->getA().manager->getComms() != NULL) { MPI_Comm A_com, temp_com; int new_num_parts, n_global, num_parts, avg; bool wantneighbors = true; A_com = level->getA().manager->getComms()->get_mpi_comm(); if (level->getA().manager->part_offsets_h.size() == 0) // create part_offsets_h & part_offsets { create_part_offsets(A_com, level->getA()); } n_global = level->getA().manager->part_offsets_h.back(); num_parts = level->getA().manager->getComms()->get_num_partitions(); avg = n_global / num_parts; level->getA().manager->computeDestinationPartitions(amg->m_consolidation_upper_threshold, avg, num_parts, new_num_parts, wantneighbors); if (new_num_parts != num_active_parts) { // Compute consolidation info compute_glue_info(level->getA()); // Compute a temporary splited communicator to glue matrices temp_com = compute_glue_matrices_communicator(level->getA()); // glue_matrices does the following : unpack --> glue --> upload --> repack glue_matrices(level->getA(), A_com, temp_com); return new_num_parts; } else { return num_active_parts; } } else { return 0; } #else return 0; #endif } template< typename TConfig0, AMGX_MemorySpace MemSpace0, AMGX_MemorySpace MemSpace1 > static void setup( AMG<t_vecPrec, t_matPrec, t_indPrec> *amg, Matrix<TConfig0> &A ) { typedef typename TConfig0::template setMemSpace<MemSpace1>::Type TConfig1; typedef typename MemorySpaceMap<MemSpace0>::Type MemorySpace0; typedef typename MemorySpaceMap<MemSpace1>::Type MemorySpace1; MemorySpace0 memorySpaceTag0; MemorySpace1 memorySpaceTag1; // delete zero level from other memoryspace if (amg->getFinestLevel(memorySpaceTag1) != NULL) { delete amg->getFinestLevel(memorySpaceTag1); AMG_Level<TConfig1> *level_0_1 = NULL; amg->setFinestLevel(level_0_1); } int min_fine_rows = amg->min_fine_rows; int min_coarse_rows = amg->min_coarse_rows; // Make sure the number of fine rows is never smaller than min_coarse_rows. min_fine_rows = std::max( min_fine_rows, min_coarse_rows ); // Reset AMG hierarchy. amg->num_levels = 1; // Build levels on the first device. AMG_Level<TConfig0> *level_0 = amg->getFinestLevel(memorySpaceTag0), *prev_level_0 = 0L; // if resetup if (level_0->isSetup() && amg->m_structure_reuse_levels == 0) { delete level_0; level_0 = AMG_LevelFactory<TConfig0>::allocate(amg); amg->setFinestLevel( level_0 ); } level_0->setA(A); level_0->setLevelIndex( 0 ); level_0->setup(); if (level_0->isClassicalAMGLevel() && amg->m_amg_consolidation_flag == 1 && level_0->getA().is_matrix_distributed()) { #ifdef AMGX_WITH_MPI if (amg->m_consolidation_lower_threshold == 0 ) // m_consolidation_lower_threshold is unset { int root = 0; int max = 0, min = 0; MPI_Comm comm = level_0->getA().manager->getComms()->get_mpi_comm(); if (level_0->getA().manager->global_id() == 0 ) { size_t avail, total; cudaMemGetInfo (&avail, &total); size_t used = level_0->bytes(); // Memory used by the finest level. size_t hierarchy = 6 * used; // Estimation of the size of the hierarchy size_t overhead = 1000000000; // 1GB of storage for other AMGX stuff // The Strength factor represents how many time a matrix like the one we localy have can fit into this GPU // This is based on the one we have on the finest level on rank 0 and considering the total hierarchy can be 6x larger double strength = (static_cast<double>(total - overhead)) / hierarchy; // The sum of memory required by coarse levels should be (approximately) smaller or equal than 6x the memory requiered by the finest level. // This assume a good load balencing // We should check when we glue matrices that we are not going out of memory. if (strength > 1.0) { int rows = level_0->getNumRows(); max = (strength * rows) / 6; // We divide by 6 because we increase the size of the following coarse levels by increasing the size of the current matrix if (max > 0) { min = max - 1; } else { max = 1; min = 0; } } else { max = 1; min = 0; } } MPI_Bcast( &max, 1, MPI_INT, root, comm ); MPI_Bcast( &min, 1, MPI_INT, root, comm ); amg->m_consolidation_lower_threshold = min; amg->m_consolidation_upper_threshold = max; } if (amg->m_consolidation_lower_threshold > 0) { prev_level_0 = setup_v2<TConfig0>( amg, level_0, min_fine_rows, min_fine_rows > min_coarse_rows ); // entering in gluing path } else #endif { prev_level_0 = setup<TConfig0>( amg, level_0, min_fine_rows, min_fine_rows > min_coarse_rows ); // no glue because the matrix is too big } } else { prev_level_0 = setup<TConfig0>( amg, level_0, min_fine_rows, min_fine_rows > min_coarse_rows ); // usual path / aggregation consolidation path } // Move to the other memory space if needed. if ( min_fine_rows == min_coarse_rows ) { Solver<TConfig0> *coarseSolver = amg->getCoarseSolver( memorySpaceTag0 ); if ( coarseSolver ) { coarseSolver->setup( level_0->getA(), false ); } } else { AMG_Level<TConfig1> *level_1 = AMG_LevelFactory<TConfig1>::allocate(amg); amg->setFinestLevel( level_1 ); level_1->getA( ).copy( level_0->getA( ) ); level_1->setLevelIndex( level_0->getLevelIndex( ) ); level_1->setup(); // Make that level the next one in the hierarchy. if ( prev_level_0 ) { prev_level_0->setNextLevel( level_1 ); assert( prev_level_0->getNextLevel( memorySpaceTag0 ) == level_0 ); prev_level_0->deleteNextLevel( memorySpaceTag0 ); } // Build the hierarchy. setup<TConfig1>( amg, level_1, min_coarse_rows, false ); // Build the coarse solver. Solver<TConfig1> *coarseSolver = amg->getCoarseSolver( memorySpaceTag1 ); if ( coarseSolver ) { coarseSolver->setup( level_1->getA(), false ); } } // Used only for device modes without hybrid mode. After reaching level where numrows <= amg_host_levels_rows // it creates copy of the hierarchy starting with this level. // This is experimental feauture intended to measure scaling of the solve part when coarse levels are on the host. if (amg->m_amg_host_levels_rows > 0) { AMG_Level<TConfig0> *d_cur_lvl = amg->getFinestLevel(memorySpaceTag0); AMG_Level<TConfig1> *h_cur_lvl = NULL, *h_prev_lvl = NULL; AMG_Level<TConfig0> *last_dev_lvl = NULL; AMG_Level<TConfig1> *first_host_lvl = NULL; while (d_cur_lvl != NULL) { if (d_cur_lvl->getNumRows() <= amg->m_amg_host_levels_rows) { break; } last_dev_lvl = d_cur_lvl; d_cur_lvl = d_cur_lvl->getNextLevel( memorySpaceTag0 ); } if (d_cur_lvl != NULL) { while (d_cur_lvl != NULL) { h_cur_lvl = AMG_LevelFactory<TConfig1>::allocate(amg, amg->tmng); h_cur_lvl->transfer_from(d_cur_lvl); h_cur_lvl->setup(); if (amg->getCoarseSolver(memorySpaceTag0) != NULL) { //remove coarse solver on the device delete amg->getCoarseSolver(memorySpaceTag0); amg->setCoarseSolver(NULL, memorySpaceTag0); // it should exist for the host, but check nevertheless Solver<TConfig1> *coarseSolver = amg->getCoarseSolver( memorySpaceTag1 ); bool coarseSolverExists = coarseSolver != NULL; if (!coarseSolverExists) { FatalError("Need to recrreate coarse solver got the host", AMGX_ERR_NOT_IMPLEMENTED); } } else { h_cur_lvl->setup_smoother(); } if (first_host_lvl == NULL) { first_host_lvl = h_cur_lvl; } if (h_prev_lvl != NULL) { h_prev_lvl->setNextLevel(h_cur_lvl); } h_prev_lvl = h_cur_lvl; h_cur_lvl = NULL; d_cur_lvl = d_cur_lvl->getNextLevel(memorySpaceTag0); } // cleanup unnecessary device hierarchy part delete last_dev_lvl->getNextLevel(memorySpaceTag0); // link last device level to the first host level last_dev_lvl->setNextLevel(first_host_lvl); last_dev_lvl->resetNextLevel(memorySpaceTag0); // tell amg that there are host levels amg->setFinestLevel( first_host_lvl ); } } MemoryInfo::updateMaxMemoryUsage(); logDeviceType<TConfig0>( ); logDeviceType<TConfig1>( ); } }; /********************************************************** * Solves the AMG system *********************************************************/ template< class T_Config > class AMG_Solve { typedef T_Config TConfig; static const AMGX_VecPrecision vecPrec = TConfig::vecPrec; static const AMGX_MatPrecision matPrec = TConfig::matPrec; static const AMGX_IndPrecision indPrec = TConfig::indPrec; typedef typename TConfig::MemSpace MemorySpace; typedef Matrix<TConfig> Matrix_hd; typedef Vector<TConfig> Vector_hd; typedef Vector<TemplateConfig<AMGX_host, vecPrec, matPrec, indPrec> > Vector_h; typedef T_Config TConfig_hd; typedef AMG<vecPrec, matPrec, indPrec> AMG_Class; public: static void solve_iteration( AMG_Class *amg, Vector_hd &b, Vector_hd &x) { cudaStreamSynchronize(0); nvtxRange amg_si("amg_solve_iteration"); MemorySpace memorySpaceTag; AMG_Level<TConfig_hd> *fine = amg->getFinestLevel( memorySpaceTag ); assert(fine != NULL); CycleFactory<TConfig>::generate( amg, fine, b, x ); fine->unsetInitCycle(); // Note: this sometimes takes too much time on host making GPU idle. // Solve is not that important for memory - main mem usage comes from setup. // Disabling this call for now //MemoryInfo::updateMaxMemoryUsage(); cudaStreamSynchronize(0); } }; // Setup the hierarchy to solve on host/device. template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::setup( Matrix_h &A ) { if ( m_dense_lu_num_rows > 0 ) { min_coarse_rows = m_dense_lu_num_rows / A.get_block_dimy(); } // read reuse structure levels option from config in case it has been changed // this allows fine control over the reuse of hierarchies if setup/solve is called multiple times m_structure_reuse_levels = m_cfg->getParameter<int>("structure_reuse_levels", m_cfg_scope); AMG_Setup<t_vecPrec, t_matPrec, t_indPrec>::template setup<TConfig_h, AMGX_host, AMGX_device>( this, A ); // Don't need the workspace anymore if ( d2_workspace != NULL && d2_workspace != csr_workspace ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( d2_workspace ); csr_workspace = NULL; } if ( csr_workspace != NULL ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( csr_workspace ); csr_workspace = NULL; } } template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::setup( Matrix_d &A ) { if ( m_dense_lu_num_rows > 0 ) { min_coarse_rows = m_dense_lu_num_rows / A.get_block_dimy(); } // read reuse structure levels option from config in case it has been changed // this allows fine control over the reuse of hierarchies if setup/solve is called multiple times m_structure_reuse_levels = m_cfg->getParameter<int>("structure_reuse_levels", m_cfg_scope); AMG_Setup<t_vecPrec, t_matPrec, t_indPrec>::template setup<TConfig_d, AMGX_device, AMGX_host>( this, A ); // Don't need the workspace anymore if ( d2_workspace != NULL && d2_workspace != csr_workspace ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( d2_workspace ); csr_workspace = NULL; } if ( csr_workspace != NULL ) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; CSR_Multiply<TConfig_d>::csr_workspace_delete( csr_workspace ); csr_workspace = NULL; } } // Setup the hierarchy to solve on host. template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::setup( AMG_Level<TConfig_h> *level ) { AMG_Setup<t_vecPrec, t_matPrec, t_indPrec>::template setup<TConfig_h>( this, level, 2, false ); } template<AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > void AMG<t_vecPrec, t_matPrec, t_indPrec>::setup( AMG_Level<TConfig_d> *level ) { AMG_Setup<t_vecPrec, t_matPrec, t_indPrec>::template setup<TConfig_d>( this, level, 2, false ); } template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > void AMG<t_vecPrec, t_matPrec, t_indPrec>::solve_init( Vector_d &b, Vector_d &x, bool xIsZero) { if (xIsZero) { fine_d->setInitCycle(); } } template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > void AMG<t_vecPrec, t_matPrec, t_indPrec>::solve_init( Vector_h &b, Vector_h &x, bool xIsZero) { if (xIsZero) { fine_h->setInitCycle(); } } template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > void AMG<t_vecPrec, t_matPrec, t_indPrec>::solve_iteration( Vector_d &b, Vector_d &x) { AMGX_CPU_PROFILER( "AMG::solve_iteration " ); AMG_Solve<TConfig_d>::solve_iteration( this, b, x); } template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > void AMG<t_vecPrec, t_matPrec, t_indPrec>::solve_iteration( Vector_h &b, Vector_h &x) { AMGX_CPU_PROFILER( "AMG::solve_iteration " ); AMG_Solve<TConfig_h>::solve_iteration( this, b, x); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::getGridStatisticsString(std::stringstream &ss) { AMG_Level<TConfig_d> *level_d = this->fine_d; AMG_Level<TConfig_h> *level_h = this->fine_h; int64_t total_rows = 0; int64_t total_nnz = 0; float total_size = 0; ss << "AMG Grid:\n"; ss << " Number of Levels: " << this->num_levels << endl; AMGXLOG("Number of Levels", this->num_levels) ss << std::setw(15) << "LVL" << std::setw(13) << "ROWS" << std::setw(18) << "NNZ" << std::setw(10) << "SPRSTY" << std::setw(15) << "Mem (GB)" << std::endl; ss << " --------------------------------------------------------------\n"; while (level_d != NULL) { int has_diag = level_d->getA( ).hasProps(DIAG) ? 1 : 0; int64_t num_rows = (int)(level_d->getA( ).get_num_rows() * level_d->getA( ).get_block_dimy()); int64_t nnz = (int)((level_d->getA( ).get_num_nz() + has_diag * level_d->getA( ).get_num_rows()) * level_d->getA( ).get_block_dimy() * level_d->getA( ).get_block_dimx()); float size = level_d->bytes(true) / 1024.0 / 1024 / 1024; // If aggregation AMG, skip this if # of neighbors = 0, since we're consolidating // If classical AMG, we need to enter here since ranks are allowed to have 0 rows (or no neighbors) if ( !level_d->getA().is_matrix_singleGPU() || (level_d->isClassicalAMGLevel() && level_d->getA().is_matrix_distributed()) ) { level_d->getA().manager->global_reduce_sum(&num_rows); level_d->getA().manager->global_reduce_sum(&nnz); level_d->getA().manager->global_reduce_sum(&size); } total_rows += num_rows; total_nnz += nnz; total_size += size; double sparsity = nnz / (double) ( num_rows * num_rows); ss << std::setw(12) << level_d->getLevelIndex( ) << "(D)" << std::setw(13) << num_rows << std::setw(18) << nnz << std::setw(10) << std::setprecision(3) << sparsity << std::setw(15) << size << std::setprecision(6) << std::endl; level_d = level_d->getNextLevel( device_memory( ) ); } while (level_h != NULL) { int has_diag = level_h->getA( ).hasProps(DIAG) ? 1 : 0; int64_t num_rows = (int)(level_h->getA( ).get_num_rows() * level_h->getA( ).get_block_dimy()); int64_t nnz = (int)((level_h->getA( ).get_num_nz() + has_diag * level_h->getA( ).get_num_rows()) * level_h->getA( ).get_block_dimy() * level_h->getA( ).get_block_dimx()); float size = level_h->bytes(true) / 1024.0 / 1024 / 1024; // If aggregation AMG, skip this if # of neighbors = 0, since we're consolidating // If classical AMG, we need to enter here since ranks are allowed to have 0 rows (or no neighbors) if ( !level_h->getA().is_matrix_singleGPU() || (level_h->isClassicalAMGLevel() && level_h->getA().is_matrix_distributed()) ) { level_h->getA().manager->global_reduce_sum(&num_rows); level_h->getA().manager->global_reduce_sum(&nnz); level_h->getA().manager->global_reduce_sum(&size); } total_rows += num_rows; total_nnz += nnz; total_size += size; double sparsity = nnz / (double) ( num_rows * num_rows); ss << std::setw(12) << level_h->getLevelIndex( ) << "(H)" << std::setw(13) << num_rows << std::setw(18) << nnz << std::setw(10) << std::setprecision(3) << sparsity << std::setw(15) << size << std::setprecision(6) << std::endl; level_h = level_h->getNextLevel( host_memory( ) ); } int64_t fine_rows; int64_t fine_nnz; if (this->fine_h) { fine_rows = this->fine_h->getA( ).get_num_rows() * this->fine_h->getA( ).get_block_dimy(); fine_nnz = this->fine_h->getA( ).get_block_dimy() * this->fine_h->getA( ).get_block_dimx() * ( this->fine_h->getA( ).get_num_nz() + (this->fine_h->getA( ).hasProps(DIAG) ? this->fine_h->getA( ).get_num_rows() : 0) ) ; if (this->fine_h->getA().is_matrix_distributed()) { this->fine_h->getA().manager->global_reduce_sum(&fine_rows); this->fine_h->getA().manager->global_reduce_sum(&fine_nnz); } } else { fine_rows = this->fine_d->getA( ).get_num_rows() * this->fine_d->getA( ).get_block_dimy() ; fine_nnz = this->fine_d->getA( ).get_block_dimy() * this->fine_d->getA( ).get_block_dimx() * ( this->fine_d->getA( ).get_num_nz() + (this->fine_d->getA( ).hasProps(DIAG) ? this->fine_d->getA( ).get_num_rows() : 0) ); if (this->fine_d->getA().is_matrix_distributed()) { this->fine_d->getA().manager->global_reduce_sum(&fine_rows); this->fine_d->getA().manager->global_reduce_sum(&fine_nnz); } } ss << " --------------------------------------------------------------\n"; ss << " Grid Complexity: " << total_rows / (double) fine_rows << std::endl; ss << " Operator Complexity: " << total_nnz / (double) fine_nnz << std::endl; ss << " Total Memory Usage: " << total_size << " GB" << std::endl; ss << " --------------------------------------------------------------\n"; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::printGridStatistics( ) { std::stringstream ss; this->getGridStatisticsString(ss); amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::getGridStatisticsString2(std::stringstream &ss) { AMG_Level<TConfig_d> *level_d = this->fine_d; AMG_Level<TConfig_h> *level_h = this->fine_h; int total_rows = 0; int total_nnz = 0; float total_size = 0; ss << " multigrid levels:\n"; while (level_d != NULL) { int has_diag = level_d->getA( ).hasProps(DIAG) ? 1 : 0; total_rows += (int)(level_d->getA( ).get_num_rows() * level_d->getA( ).get_block_dimy()); total_nnz += (int)((level_d->getA( ).get_num_nz() + has_diag * level_d->getA( ).get_num_rows()) * level_d->getA( ).get_block_dimy() * level_d->getA( ).get_block_dimx()); float size = level_d->bytes() / 1024.0 / 1024 / 1024; total_size += size; ss << std::setw(5) << level_d->getLevelIndex( ) << " " << std::setw(5) << level_d->getA( ).get_num_rows() << std::endl; level_d = level_d->getNextLevel( device_memory( ) ); } while (level_h != NULL) { int has_diag = level_h->getA( ).hasProps(DIAG) ? 1 : 0; total_rows += (int)(level_h->getA( ).get_num_rows() * level_h->getA( ).get_block_dimy()); total_nnz += (int)((level_h->getA( ).get_num_nz() + has_diag * level_h->getA( ).get_num_rows()) * level_h->getA( ).get_block_dimy() * level_h->getA( ).get_block_dimx()); float size = level_h->bytes() / 1024.0 / 1024 / 1024; total_size += size; ss << std::setw(5) << level_h->getLevelIndex( ) << " " << std::setw(5) << level_h->getA( ).get_num_rows() << std::endl; level_h = level_h->getNextLevel( host_memory( ) ); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::printGridStatistics2( ) { std::stringstream ss; this->getGridStatisticsString2(ss); amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } using std::scientific; using std::fixed; // print a line of length l, starting at character s void printLine(const int l, const int s) { std::stringstream ss; ss << setw(s) << " "; for (int i = 0; i < l; i++) { ss << "-"; } ss << endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::printCoarsePoints() { #ifdef DEBUG typedef std::vector<int> iVec; typedef std::vector<int>::iterator iVecIter; ofstream coarsePoints("coarse_points.dat"); iVec originalRows; AMG_Level<TConfig_d> *level_d = fine_d; while ( level_d != NULL ) { originalRows = level_d->getOriginalRows(); level_d = level_d->next_d; if (level_d == NULL) { break; } coarsePoints << level_d->level_id << " " << level_d->getNumRows() << endl; for (iVecIter it = originalRows.begin(); it != originalRows.end(); ++it) { coarsePoints << *it << endl; } } AMG_Level<TConfig_h> *level_h = fine_h; while ( level_h != NULL ) { originalRows = level_h->getOriginalRows(); level_h = level_h->next_h; if (level_h == NULL) { break; } coarsePoints << level_h->level_id << " " << level_h->getNumRows() << endl; for (iVecIter it = originalRows.begin(); it != originalRows.end(); ++it) { coarsePoints << *it << endl; } } coarsePoints.close(); #endif } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void AMG<t_vecPrec, t_matPrec, t_indPrec>::printConnections() { #ifdef DEBUG ofstream connFile("connections.dat"); AMG_Level<TConfig_d> *level_d = fine_d; Matrix_d ATemp_d; while (level_d != NULL) { connFile << level_d->level_id << " " << level_d->getNumRows() << endl; ATemp_d = level_d->getA(); for (int i = 0; i < ATemp_d.get_num_rows(); i++) { // get the row offset & num rows int offset = ATemp_d.row_offsets[i]; int numEntries = ATemp_d.row_offsets[i + 1] - offset; // # of connections is numEntries - 1 (ignoring diagonal) // this->numConnections.push_back(numEntries-1); connFile << numEntries - 1 << " "; // loop over non-zeros and add non-diagonal terms for (int j = offset; j < offset + numEntries; j++) { int columnIndex = ATemp_d.column_indices[j]; if (i != columnIndex) { // this->connections.push_back(columnIndex); connFile << columnIndex << " "; } } connFile << endl; } level_d = level_d->next_d; } AMG_Level<TConfig_h> *level_h = fine_h; Matrix_h ATemp_h; while (level_h != NULL) { connFile << level_h->level_id << " " << level_h->getNumRows() << endl; ATemp_d = level_h->getA(); for (int i = 0; i < ATemp_h.get_num_rows(); i++) { // get the row offset & num rows int offset = ATemp_h.row_offsets[i]; int numEntries = ATemp_h.row_offsets[i + 1] - offset; // # of connections is numEntries - 1 (ignoring diagonal) // this->numConnections.push_back(numEntries-1); connFile << numEntries - 1 << " "; // loop over non-zeros and add non-diagonal terms for (int j = offset; j < offset + numEntries; j++) { int columnIndex = ATemp_h.column_indices[j]; if (i != columnIndex) { // this->connections.push_back(columnIndex); connFile << columnIndex << " "; } } connFile << endl; } level_h = level_h->next_h; } #endif } /**************************************** * Explict instantiations ***************************************/ // real valued case template class AMG<AMGX_vecDouble, AMGX_matDouble, AMGX_indInt>; template class AMG<AMGX_vecFloat, AMGX_matFloat, AMGX_indInt>; template class AMG<AMGX_vecDouble, AMGX_matFloat, AMGX_indInt>; // complex valued case template class AMG<AMGX_vecComplex, AMGX_matComplex, AMGX_indInt>; template class AMG<AMGX_vecDoubleComplex, AMGX_matComplex, AMGX_indInt>; template class AMG<AMGX_vecDoubleComplex, AMGX_matDoubleComplex, AMGX_indInt>; } // namespace amgx
5901591314c43f7aa50676f162d432dd96694401.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <iostream> #include "gpu-new-forward.h" #include <cstdio> #include <cassert> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ std::cerr << "CUDA error: " << hipGetErrorString(err) << std::endl; \ std::cerr << "Failed to run stmt " << #stmt << std::endl; \ exit(-1); \ } \ } while (0) template <size_t tile_width=16> __global__ void conv_forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K) { /* Modify this function to implement the forward pass described in Chapter 16. We have added an additional dimension to the tensors to support an entire mini-batch The goal here is to be correct AND fast. Function paramter definitions: y - output x - input k - kernel B - batch_size (number of images in x) M - number of output feature maps C - number of input feature maps H - input height dimension W - input width dimension K - kernel height and width (K x K) */ const int H_out = H - K + 1; const int W_out = W - K + 1; // We have some nice #defs for you below to simplify indexing. // Feel free to use them, or create your own. // An example use of these macros: // float a = y4d(0,0,0,0) // y4d(0,0,0,0) = a #define y4d(i3, i2, i1, i0) y[ \ (i3) * (M * H_out * W_out) + \ (i2) * (H_out * W_out) + \ (i1) * (W_out) + \ i0] #define x4d(i3, i2, i1, i0) x[ \ (i3) * (C * H * W) + \ (i2) * (H * W) + \ (i1) * (W) + \ i0] #define k4d(i3, i2, i1, i0) k[ \ (i3) * (C * K * K) + \ (i2) * (K * K) + \ (i1) * (K) + \ i0] auto yidx = [M, H_out, W_out](size_t i3, size_t i2, size_t i1, size_t i0) -> size_t { return i3*M*H_out*W_out + i2*H_out+W_out + i1*W_out + i2;}; auto xidx = [C, H, W](size_t i3, size_t i2, size_t i1, size_t i0) -> size_t { return i3*C*H*W + i2*H*W + i1*W + i0; }; auto kidx = [C, K](size_t i3, size_t i2, size_t i1, size_t i0) -> size_t { return i3*C*K*K + i2*K*K* + i1*K + i0; }; // Insert your GPU convolution kernel code here // Each thread computes a single output tile // Each block computes 16x16 output tiles const int W_grid = (W_out + (tile_width-1)) / tile_width; const int n = blockIdx.x; const int m = blockIdx.y; const int h = (blockIdx.z / W_grid) * tile_width + threadIdx.y; const int w = (blockIdx.z % W_grid) * tile_width + threadIdx.x; if (h < H_out && w < W_out) { float acc = 0.0f; for (int c = 0; c < C; ++c) { for (int p = 0; p < K; ++p) { for (int q = 0; q < K; ++q) { float xval = x4d(n, c, h+p, w+q); float kval = k4d(m, c, p, q); acc += xval * kval; // acc += x4d(n, c, h + p, w + q) * k4d(m, c, p, q); }} } y4d(n, m, h, w) = acc; } #undef y4d #undef x4d #undef k4d } __host__ void GPUInterface::conv_forward_gpu(float* host_y, const float* host_x, const float* host_k, const int B, const int M, const int C, const int H, const int W, const int K) { // Function paramter definitions: // y - output // x - input // k - kernel // B - batch_size (number of images in x) // M - number of output feature maps // C - number of input feature maps // H - input height dimension // W - input width dimension // K - kernel height and width (K x K) // Declare relevant device pointers float* dev_y = nullptr; float* dev_x = nullptr; float* dev_k = nullptr; // Allocate memory and copy over the relevant data structures to the GPU const int H_out = H - K + 1; const int W_out = W - K + 1; const size_t size_x = B * C * H * W * sizeof(*dev_x); const size_t size_y = B * M * H_out * W_out * sizeof(*dev_y); const size_t size_k = M * C * K * K * sizeof(*dev_k); wbCheck(hipMalloc(&dev_y, size_y)); wbCheck(hipMalloc(&dev_x, size_x)); wbCheck(hipMalloc(&dev_k, size_k)); wbCheck(hipMemcpy(dev_x, host_x, size_x, hipMemcpyHostToDevice)); wbCheck(hipMemcpy(dev_k, host_k, size_k, hipMemcpyHostToDevice)); // Set the kernel dimensions and call the kernel // kernel dimensions: static constexpr size_t tile_width = 16; // Block Dims --- 16 x 16. Each thread computes a single output tile dim3 BlockDim(tile_width, tile_width); // Grid Dims: (X, Y, Z) --- (batch, output feature map, tile) size_t Zy = (H_out + (tile_width-1)) / tile_width; size_t Zx = (W_out + (tile_width-1)) / tile_width; dim3 GridDim(B, M, Zy * Zx); hipLaunchKernelGGL(( conv_forward_kernel<tile_width>) , dim3(GridDim), dim3(BlockDim), 0, 0, dev_y, dev_x, dev_k, B, M, C, H, W, K ); // Copy the output back to host wbCheck(hipMemcpy(host_y, dev_y, size_y, hipMemcpyDeviceToHost)); // Free device memory wbCheck(hipFree(dev_y)); wbCheck(hipFree(dev_k)); wbCheck(hipFree(dev_x)); // Useful snippet for error checking // hipError_t error = hipGetLastError(); // if(error != hipSuccess) // { // std::cout<<"CUDA error: "<<hipGetErrorString(error)<<std::endl; // exit(-1); // } } __host__ void GPUInterface::get_device_properties() { int deviceCount; hipGetDeviceCount(&deviceCount); for(int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl; std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl; std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl; std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl; std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl; std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl; std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl; std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl; std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl; } }
5901591314c43f7aa50676f162d432dd96694401.cu
#include <cmath> #include <iostream> #include "gpu-new-forward.h" #include <cstdio> #include <cassert> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ std::cerr << "CUDA error: " << cudaGetErrorString(err) << std::endl; \ std::cerr << "Failed to run stmt " << #stmt << std::endl; \ exit(-1); \ } \ } while (0) template <size_t tile_width=16> __global__ void conv_forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K) { /* Modify this function to implement the forward pass described in Chapter 16. We have added an additional dimension to the tensors to support an entire mini-batch The goal here is to be correct AND fast. Function paramter definitions: y - output x - input k - kernel B - batch_size (number of images in x) M - number of output feature maps C - number of input feature maps H - input height dimension W - input width dimension K - kernel height and width (K x K) */ const int H_out = H - K + 1; const int W_out = W - K + 1; // We have some nice #defs for you below to simplify indexing. // Feel free to use them, or create your own. // An example use of these macros: // float a = y4d(0,0,0,0) // y4d(0,0,0,0) = a #define y4d(i3, i2, i1, i0) y[ \ (i3) * (M * H_out * W_out) + \ (i2) * (H_out * W_out) + \ (i1) * (W_out) + \ i0] #define x4d(i3, i2, i1, i0) x[ \ (i3) * (C * H * W) + \ (i2) * (H * W) + \ (i1) * (W) + \ i0] #define k4d(i3, i2, i1, i0) k[ \ (i3) * (C * K * K) + \ (i2) * (K * K) + \ (i1) * (K) + \ i0] auto yidx = [M, H_out, W_out](size_t i3, size_t i2, size_t i1, size_t i0) -> size_t { return i3*M*H_out*W_out + i2*H_out+W_out + i1*W_out + i2;}; auto xidx = [C, H, W](size_t i3, size_t i2, size_t i1, size_t i0) -> size_t { return i3*C*H*W + i2*H*W + i1*W + i0; }; auto kidx = [C, K](size_t i3, size_t i2, size_t i1, size_t i0) -> size_t { return i3*C*K*K + i2*K*K* + i1*K + i0; }; // Insert your GPU convolution kernel code here // Each thread computes a single output tile // Each block computes 16x16 output tiles const int W_grid = (W_out + (tile_width-1)) / tile_width; const int n = blockIdx.x; const int m = blockIdx.y; const int h = (blockIdx.z / W_grid) * tile_width + threadIdx.y; const int w = (blockIdx.z % W_grid) * tile_width + threadIdx.x; if (h < H_out && w < W_out) { float acc = 0.0f; for (int c = 0; c < C; ++c) { for (int p = 0; p < K; ++p) { for (int q = 0; q < K; ++q) { float xval = x4d(n, c, h+p, w+q); float kval = k4d(m, c, p, q); acc += xval * kval; // acc += x4d(n, c, h + p, w + q) * k4d(m, c, p, q); }} } y4d(n, m, h, w) = acc; } #undef y4d #undef x4d #undef k4d } __host__ void GPUInterface::conv_forward_gpu(float* host_y, const float* host_x, const float* host_k, const int B, const int M, const int C, const int H, const int W, const int K) { // Function paramter definitions: // y - output // x - input // k - kernel // B - batch_size (number of images in x) // M - number of output feature maps // C - number of input feature maps // H - input height dimension // W - input width dimension // K - kernel height and width (K x K) // Declare relevant device pointers float* dev_y = nullptr; float* dev_x = nullptr; float* dev_k = nullptr; // Allocate memory and copy over the relevant data structures to the GPU const int H_out = H - K + 1; const int W_out = W - K + 1; const size_t size_x = B * C * H * W * sizeof(*dev_x); const size_t size_y = B * M * H_out * W_out * sizeof(*dev_y); const size_t size_k = M * C * K * K * sizeof(*dev_k); wbCheck(cudaMalloc(&dev_y, size_y)); wbCheck(cudaMalloc(&dev_x, size_x)); wbCheck(cudaMalloc(&dev_k, size_k)); wbCheck(cudaMemcpy(dev_x, host_x, size_x, cudaMemcpyHostToDevice)); wbCheck(cudaMemcpy(dev_k, host_k, size_k, cudaMemcpyHostToDevice)); // Set the kernel dimensions and call the kernel // kernel dimensions: static constexpr size_t tile_width = 16; // Block Dims --- 16 x 16. Each thread computes a single output tile dim3 BlockDim(tile_width, tile_width); // Grid Dims: (X, Y, Z) --- (batch, output feature map, tile) size_t Zy = (H_out + (tile_width-1)) / tile_width; size_t Zx = (W_out + (tile_width-1)) / tile_width; dim3 GridDim(B, M, Zy * Zx); conv_forward_kernel<tile_width> <<<GridDim, BlockDim>>>( dev_y, dev_x, dev_k, B, M, C, H, W, K ); // Copy the output back to host wbCheck(cudaMemcpy(host_y, dev_y, size_y, cudaMemcpyDeviceToHost)); // Free device memory wbCheck(cudaFree(dev_y)); wbCheck(cudaFree(dev_k)); wbCheck(cudaFree(dev_x)); // Useful snippet for error checking // cudaError_t error = cudaGetLastError(); // if(error != cudaSuccess) // { // std::cout<<"CUDA error: "<<cudaGetErrorString(error)<<std::endl; // exit(-1); // } } __host__ void GPUInterface::get_device_properties() { int deviceCount; cudaGetDeviceCount(&deviceCount); for(int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl; std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl; std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl; std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl; std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl; std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl; std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl; std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl; std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl; } }
4f64f7aedf21bde25751c1a25e491a1f20235e19.hip
// !!! This is a file automatically generated by hipify!!! //Inner product (dot product) of two vectors in a parallel fashion #include <stdio.h> #include <iostream> #include <hip/hip_runtime.h> #include <time.h> #include <math.h> #define N (pow(2,14)) #include "kernelsGPU.cuh" #include "kernelsCPU.h" int main(void) { // ----- Variables to profile the execution time float CPU_profile, GPU_profile; // CPU clock_t startCPU, endCPU; // GPU hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //---------------------------------------------- float *a, *b, *c;// host copies of a, b, c float *d_a, *d_b, *d_c;// device copies of a, b, c float size = N * sizeof(float); //int sizeInGPU; a = (float *)malloc(size); b = (float *)malloc(size); c = (float *)malloc(sizeof(float)); // Define QoS: p0 // supervisor(float *lambda_GPU) // sizeInGPU = lambda_GPU*N; for (int i = 0; i < N; i++) { a[i] = 1; b[i] = 1; } // In the CPU ------------------------------------------ startCPU = clock(); innerProdCPU(a, b, c); endCPU = clock(); // Elapsed time -- CPU CPU_profile = (((double) (endCPU - startCPU)) / CLOCKS_PER_SEC)*1000; // ----------------------------------------------------- // In the GPU ------------------------------------------ hipMalloc((void**)&d_a, size); hipMalloc((void**)&d_b, size); hipMalloc((void**)&d_c, sizeof(float)); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); // Call kernel hipEventRecord(start); hipLaunchKernelGGL(( innerProd), dim3(NUMBER_OF_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c); hipEventRecord(stop); hipMemcpy(c, d_c, sizeof(float), hipMemcpyDeviceToHost); hipEventSynchronize(stop); // Elapsed time -- GPU hipEventElapsedTime(&GPU_profile, start, stop); // ----------------------------------------------------- std::cout << "NUMBER_OF_BLOCKS = " << NUMBER_OF_BLOCKS << "\n"; std::cout << "c = " << *c << "\n"; std::cout << "Kernel execution time in CPU = " << CPU_profile << " milliseconds" << "\n"; std::cout << "Kernel execution time in GPU = " << GPU_profile << " milliseconds" << "\n"; // Remember: free and hipFree DO NOT ERASE MEMORY! They only // return memory to a pool to be re-allocated. That is why the shared // variable 'cc' is initialized inside the kernel. See this: // http://stackoverflow.com/questions/13100615/cudafree-is-not-freeing-memory free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
4f64f7aedf21bde25751c1a25e491a1f20235e19.cu
//Inner product (dot product) of two vectors in a parallel fashion #include <stdio.h> #include <iostream> #include <cuda.h> #include <time.h> #include <math.h> #define N (pow(2,14)) #include "kernelsGPU.cuh" #include "kernelsCPU.h" int main(void) { // ----- Variables to profile the execution time float CPU_profile, GPU_profile; // CPU clock_t startCPU, endCPU; // GPU cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //---------------------------------------------- float *a, *b, *c;// host copies of a, b, c float *d_a, *d_b, *d_c;// device copies of a, b, c float size = N * sizeof(float); //int sizeInGPU; a = (float *)malloc(size); b = (float *)malloc(size); c = (float *)malloc(sizeof(float)); // Define QoS: p0 // supervisor(float *lambda_GPU) // sizeInGPU = lambda_GPU*N; for (int i = 0; i < N; i++) { a[i] = 1; b[i] = 1; } // In the CPU ------------------------------------------ startCPU = clock(); innerProdCPU(a, b, c); endCPU = clock(); // Elapsed time -- CPU CPU_profile = (((double) (endCPU - startCPU)) / CLOCKS_PER_SEC)*1000; // ----------------------------------------------------- // In the GPU ------------------------------------------ cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, sizeof(float)); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Call kernel cudaEventRecord(start); innerProd<<<NUMBER_OF_BLOCKS, THREADS_PER_BLOCK>>>(d_a, d_b, d_c); cudaEventRecord(stop); cudaMemcpy(c, d_c, sizeof(float), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); // Elapsed time -- GPU cudaEventElapsedTime(&GPU_profile, start, stop); // ----------------------------------------------------- std::cout << "NUMBER_OF_BLOCKS = " << NUMBER_OF_BLOCKS << "\n"; std::cout << "c = " << *c << "\n"; std::cout << "Kernel execution time in CPU = " << CPU_profile << " milliseconds" << "\n"; std::cout << "Kernel execution time in GPU = " << GPU_profile << " milliseconds" << "\n"; // Remember: free and cudaFree DO NOT ERASE MEMORY! They only // return memory to a pool to be re-allocated. That is why the shared // variable 'cc' is initialized inside the kernel. See this: // http://stackoverflow.com/questions/13100615/cudafree-is-not-freeing-memory free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
5a4a3d2c91ce8106fa03dfa5c31ef93a69736b5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <chrono> #include <cstdint> #include <fstream> #include <iostream> #include <stdio.h> #include <vector> using std::cin; using std::cout; /******************************************************************************************************************************************************* * * DEVICE * *******************************************************************************************************************************************************/ namespace Device { // , . const int nThreads = 32; // . double *inImage = nullptr; // . double *outImage = nullptr; // ( . double *filters = nullptr; // double *featureMaps = nullptr; // ( , ). struct Size { std::size_t inImageSize; std::size_t outImageWidth; std::size_t outImageHeight; std::size_t outImageSize; std::size_t kernelWeightsSize; std::size_t featureMapsize; } size; // . static void freeMemory() { hipFree(inImage); hipFree(outImage); hipFree(filters); hipFree(featureMaps); } /* * @param width - . * @param height - . * @param stride - . * @param filterLineSize - . . * @param filtersCount - . * @note , . */ static int allocateMemory(std::size_t width, std::size_t height, std::size_t stride, std::size_t filterLineSize, std::size_t filtersCount) { // . auto hipError_t = hipSuccess; // . // , int float sizeof ( , ). auto kernelWeightsSize = filtersCount * filterLineSize * filterLineSize; hipError_t = hipMalloc(&filters, kernelWeightsSize * sizeof(*filters)); if (hipSuccess != hipError_t) { freeMemory(); return -1; } auto inImageSize = width * height * 3; hipError_t = hipMalloc(&inImage, inImageSize * sizeof(*inImage)); if (hipSuccess != hipError_t) { freeMemory(); return -1; } auto outImageWidth = ((width - filterLineSize) / stride + 1); auto outImageHeight = ((height - filterLineSize) / stride + 1); auto outImageSize = outImageWidth * outImageHeight * 3; hipError_t = hipMalloc(&outImage, outImageSize * sizeof(*outImage)); if (hipSuccess != hipError_t) { freeMemory(); return -1; } auto featureMapsize = filtersCount * outImageSize; hipError_t = hipMalloc(&featureMaps, featureMapsize * sizeof(*featureMaps)); if (hipSuccess != hipError_t) { freeMemory(); return -1; } // . hipError_t = hipMemset(filters, 0, kernelWeightsSize * sizeof(*filters)); if (hipSuccess != hipError_t) { freeMemory(); return -1; } hipError_t = hipMemset(inImage, 0, inImageSize * sizeof(*inImage)); if (hipSuccess != hipError_t) { freeMemory(); return -1; } hipError_t = hipMemset(outImage, 0, outImageSize * sizeof(*outImage)); if (hipSuccess != hipError_t) { freeMemory(); return -1; } hipError_t = hipMemset(featureMaps, 0, featureMapsize * sizeof(*featureMaps)); if (hipSuccess != hipError_t) { freeMemory(); return -1; } // , . size.kernelWeightsSize = kernelWeightsSize; size.inImageSize = inImageSize; size.outImageWidth = outImageWidth; size.outImageHeight = outImageHeight; size.outImageSize = outImageSize; size.featureMapsize = featureMapsize; return 0; } } /* , . * * * 1. inImage. [[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ] * [r, g, b] - ( ) * [[r, g, b], [r, g, b], ..., ] - * [[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ] - . * * . [ [ [3]]]. !!! * * , 3, * [[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ] r, g, b, r, g, b, r, g, b, ... * * * ( ) . - [r, g, b] ( , , ). * - ( 0 255) . , . * * 2. . [ [[1, 1, 1,], [1, 1, 1], [1, 1, 1]], ... ]. - 33 * * * * * * * - , , , . * , , , . * . ( , ) * , . featureMaps. . * , featureMaps [[[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ], ...] * , , , , - .. * * , , , , outImage. inImage ( ). * * * @param inImage - . * @param width - . * @param height - . * @param filters - . * @param filtersCount - . * @param stride - . * @param outImage - . * @param featureMaps - . * @param outWidth - . * @param outHeight - . */ /* __global__ void gpuCNN( const double *inImage, std::size_t width, std::size_t height, double *filters, std::size_t filtersCount, std::size_t filterLineSize, std::size_t stride, double *outImage, double *featureMaps, std::size_t outWidth, std::size_t outHeight) { auto halfLineSize = filterLineSize / 2; stride = (0 == stride) ? 1 : stride; auto outPixelX = threadIdx.x + blockIdx.x * blockDim.x; auto outPixelY = threadIdx.y + blockIdx.y * blockDim.y; if (outPixelX < outWidth && outPixelY < outHeight) { auto pixelX = outPixelX * stride + halfLineSize; auto pixelY = outPixelY * stride + halfLineSize; // GPU for(std::size_t colorIdx = 0; colorIdx < 3; ++colorIdx) { double outImageSum = 0; auto outColorPos = outPixelY*outWidth*3 + outPixelX*3 + colorIdx; for(std::size_t filterIdx = 0; filterIdx < filtersCount; ++filterIdx) { double currentFilterSum = 0; for(std::size_t i = 0; i < filterLineSize; ++i) { for(std::size_t j = 0; j < filterLineSize; ++j) { auto convPixelX = pixelX + j - halfLineSize; auto convPixelY = pixelY + i - halfLineSize; auto colorPos = convPixelY*width*3 + convPixelX*3 + colorIdx; auto weightPos = filterIdx*filterLineSize*filterLineSize + i*filterLineSize + j; currentFilterSum += inImage[colorPos] * filters[weightPos]; } } outImageSum += currentFilterSum; featureMaps[filterIdx*outWidth*outHeight*3 + outColorPos] = currentFilterSum; } outImage[outColorPos] = outImageSum / (float)filtersCount; } } } */ /******************************************************************************************************************************************************* * * DEVICE * *******************************************************************************************************************************************************/ void cpuCNN( const std::vector<double> &inImage, std::size_t width, std::size_t height, const std::vector<double> &filters, std::size_t filtersCount, std::size_t filterLineSize, std::size_t stride, std::vector<double> &outImage, std::vector<double> &featureMaps, std::size_t outWidth, std::size_t outHeight ) { static const auto halfLineSize = filterLineSize / 2; //stride = (0 == stride) ? 1 : stride; for(std::size_t x = 0; x < outWidth; ++x) { for(std::size_t y = 0; y < outHeight; ++y) { auto pixelX = x * stride + halfLineSize; auto pixelY = y * stride + halfLineSize; // CPU for(std::size_t colorIdx = 0; colorIdx < 3; ++colorIdx) { double outImageSum = 0; auto outPixelX = (pixelX - halfLineSize) / stride; auto outPixelY = (pixelY - halfLineSize) / stride; auto outColorPos = outPixelY*outWidth*3 + outPixelX*3 + colorIdx; for(std::size_t filterIdx = 0; filterIdx < filtersCount; ++filterIdx) { double currentFilterSum = 0; for(std::size_t i = 0; i < filterLineSize; ++i) { for(std::size_t j = 0; j < filterLineSize; ++j) { auto convPixelX = pixelX + j - halfLineSize; auto convPixelY = pixelY + i - halfLineSize; auto colorPos = convPixelY*width*3 + convPixelX*3 + colorIdx; auto weightPos = filterIdx*filterLineSize*filterLineSize + i*filterLineSize + j; currentFilterSum += inImage[colorPos] * filters[weightPos]; } } outImageSum += currentFilterSum; featureMaps[filterIdx*outWidth*outHeight*3 + outColorPos] = currentFilterSum; } outImage[outColorPos] = outImageSum / (float)filtersCount; } } } } /******************************************************************************************************************************************************* * * MAIN * *******************************************************************************************************************************************************/ namespace CliArgs { // , ( 0, 1 ). static const int N_ARGS = 5; // . static const int IN_FILE_POS = 1; // - . static const int IMG_WIDTH = 2; // - . static const int IMG_HEIGHT = 3; // static const int STRIDE = 4; } /******************************************************************************************************************************************************* * * MAIN * *******************************************************************************************************************************************************/ // // , . int main(int argc, char *argv[]) { #if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) setlocale(0, "russian"); #endif // . if (CliArgs::N_ARGS != argc) { cout << " ." << std::endl; getchar(); return 1; } // . auto imageFilePath = argv[CliArgs::IN_FILE_POS]; // . auto imageWidth = atoi(argv[CliArgs::IMG_WIDTH]); auto imageHeight = atoi(argv[CliArgs::IMG_HEIGHT]); // . auto stride = atoi(argv[CliArgs::STRIDE]); auto imageSize = imageWidth * imageHeight * 3; // . std::ifstream ifs(imageFilePath, std::ios_base::in); if (!ifs.is_open()) { cout << " " << imageFilePath << std::endl; getchar(); return 1; } std::cout << " ..." << std::endl; std::vector<double> imageData(imageSize); for (std::size_t i = 0; i < imageSize; ++i) ifs >> imageData[i]; ifs.close(); std::cout << " " << std::endl; // . auto filterLineSize = 3; auto filtersCount = 4; std::vector<double> filters = { 0.5, -1, 0.5, 0.5, -1, 0.5, 0.5, -1, 0.5, 0, 0, 2, 0, 2, 0, 2, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, -.5, -0.5, -0.5, 1, 1, 1, -0.5, -0.5, -0.5 }; // . if (0 != Device::allocateMemory(imageWidth, imageHeight, stride, filterLineSize, filtersCount)) { cout << " " << std::endl; getchar(); return 1; } cout << " " << std::endl; // HOST -> GPU. auto hipError_t = hipSuccess; hipError_t = hipMemcpy(Device::inImage, imageData.data(), imageSize * sizeof(imageData[0]), hipMemcpyHostToDevice); if (hipSuccess != hipError_t) { cout << " : " << hipError_t << std::endl; getchar(); Device::freeMemory(); return 1; } hipError_t = hipMemcpy(Device::filters, filters.data(), filters.size() * sizeof(filters[0]), hipMemcpyHostToDevice); if (hipSuccess != hipError_t) { cout << " : " << hipError_t << std::endl; getchar(); Device::freeMemory(); return 1; } cout << " " << std::endl; // CPU. std::vector<double> cpuOutImage(Device::size.outImageSize, 0.0); std::vector<double> cpuFeatureMaps(Device::size.featureMapsize, 0.0); // CPU. auto cpuBeginTime = std::chrono::steady_clock::now(); cpuCNN( imageData, imageWidth, imageHeight, filters, filtersCount, filterLineSize, stride, cpuOutImage, cpuFeatureMaps, Device::size.outImageWidth, Device::size.outImageHeight ); auto cpuTime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - cpuBeginTime).count(); // . std::cout << " ..." << std::endl; std::ofstream cpuImgOfs("cpu_out_image.txt", std::ios_base::out | std::ios_base::trunc); if (cpuImgOfs.is_open()) for (std::size_t i = 0; i < Device::size.outImageSize; ++i) cpuImgOfs << static_cast<unsigned int>(cpuOutImage[i]) % 255 << " "; cpuImgOfs.close(); cout << " ..." << std::endl; cout << " ..." << std::endl; std::ofstream cpuFmOfs("cpu_out_features.txt", std::ios_base::out | std::ios_base::trunc); if (cpuFmOfs.is_open()) for (std::size_t i = 0; i < Device::size.featureMapsize; ++i) cpuFmOfs << static_cast<unsigned int>(cpuFeatureMaps[i]) % 255 << " "; cpuFmOfs.close(); cout << " ..." << std::endl; //delete[] cpuOutImage; //delete[] cpuFeatureMaps; // GPU. /* dim3 threads(Device::nThreads, Device::nThreads); auto nBlocksX = Device::size.outImageWidth / threads.x; nBlocksX += (0 == Device::size.outImageWidth % threads.x) ? 0 : 1; auto nBlocksY = Device::size.outImageHeight / threads.y; nBlocksY += (0 == Device::size.outImageHeight % threads.y) ? 0 : 1; dim3 blocks(nBlocksX, nBlocksY); . auto gpuBeginTime = std::chrono::steady_clock::now(); gpuCNN <<< blocks, threads >>> ( Device::inImage, imageWidth, imageHeight, Device::filters, filtersCount, filterLineSize, stride, Device::outImage, Device::featureMaps, Device::size.outImageWidth, Device::size.outImageHeight ); hipDeviceSynchronize(); auto gpuTime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - gpuBeginTime).count(); cout << " GPU" << std::endl; GPU . auto outImage = new double[Device::size.outImageSize]; auto featureMaps = new double[Device::size.featureMapsize]; hipError_t = hipMemcpy(outImage, Device::outImage, Device::size.outImageSize * sizeof(*outImage), hipMemcpyDeviceToHost); if (hipSuccess != hipError_t) { cout << " : " << hipError_t << std::endl; getchar(); Device::freeMemory(); return 1; } hipError_t = hipMemcpy(featureMaps, Device::featureMaps, Device::size.featureMapsize * sizeof(*featureMaps), hipMemcpyDeviceToHost); if (hipSuccess != hipError_t) { cout << " : " << hipError_t << std::endl; getchar(); Device::freeMemory(); return 1; } Device::freeMemory(); cout << " " << std::endl; . std::cout << " ..." << std::endl; std::ofstream imgOfs("out_image.txt", std::ios_base::out | std::ios_base::trunc); if (imgOfs.is_open()) for (std::size_t i = 0; i < Device::size.outImageSize; ++i) imgOfs << static_cast<unsigned int>(outImage[i]) % 255 << " "; imgOfs.close(); cout << " ..." << std::endl; cout << " ..." << std::endl; std::ofstream fmOfs("out_features.txt", std::ios_base::out | std::ios_base::trunc); if (fmOfs.is_open()) for (std::size_t i = 0; i < Device::size.featureMapsize; ++i) fmOfs << static_cast<unsigned int>(featureMaps[i]) % 255 << " "; fmOfs.close(); cout << " ..." << std::endl; delete[] outImage; delete[] featureMaps; */ cout << std::endl << std::endl; cout << " " << Device::size.outImageWidth << " X " << Device::size.outImageHeight << std::endl; cout << " " << Device::size.outImageWidth << " X " << Device::size.outImageHeight * filtersCount << std::endl << std::endl; cout << " CPU: " << cpuTime << " "<< std::endl; //cout << " GPU: " << gpuTime << " " << std::endl; cout << " Enter." << std::endl; getchar(); return 0; }
5a4a3d2c91ce8106fa03dfa5c31ef93a69736b5c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <chrono> #include <cstdint> #include <fstream> #include <iostream> #include <stdio.h> #include <vector> using std::cin; using std::cout; /******************************************************************************************************************************************************* * * DEVICE * *******************************************************************************************************************************************************/ namespace Device { // Количество потоков на измерение, которое мы будем использовать. const int nThreads = 32; // Исходное изображение. double *inImage = nullptr; // Выходное изображение. double *outImage = nullptr; // Веса фильтров (размерности фильтров должны быть одинаковыми. double *filters = nullptr; // Результат работы фильтров double *featureMaps = nullptr; // Вспомогательная структура со всеми размерами (в штуках, а не в байтах). struct Size { std::size_t inImageSize; std::size_t outImageWidth; std::size_t outImageHeight; std::size_t outImageSize; std::size_t kernelWeightsSize; std::size_t featureMapsize; } size; // Освобождает память. static void freeMemory() { cudaFree(inImage); cudaFree(outImage); cudaFree(filters); cudaFree(featureMaps); } /* * @param width - ширина изображения. * @param height - высота изображения. * @param stride - сдвиг окна фильтра. * @param filterLineSize - размер строки в матрице одного фильтра. Должен быть нечетным. * @param filtersCount - количество фильтров. * @note Подразумевается, что размеры всех фильтров одинаковы. */ static int allocateMemory(std::size_t width, std::size_t height, std::size_t stride, std::size_t filterLineSize, std::size_t filtersCount) { // Для контроля ошибок выделения памяти. auto cudaError = cudaSuccess; // Выделяем память. // Память выделяется в БАЙТАХ, поэтому даже для int или float нужно домножать на sizeof (то есть на количество байт, которое занимает переменная типа). auto kernelWeightsSize = filtersCount * filterLineSize * filterLineSize; cudaError = cudaMalloc(&filters, kernelWeightsSize * sizeof(*filters)); if (cudaSuccess != cudaError) { freeMemory(); return -1; } auto inImageSize = width * height * 3; cudaError = cudaMalloc(&inImage, inImageSize * sizeof(*inImage)); if (cudaSuccess != cudaError) { freeMemory(); return -1; } auto outImageWidth = ((width - filterLineSize) / stride + 1); auto outImageHeight = ((height - filterLineSize) / stride + 1); auto outImageSize = outImageWidth * outImageHeight * 3; cudaError = cudaMalloc(&outImage, outImageSize * sizeof(*outImage)); if (cudaSuccess != cudaError) { freeMemory(); return -1; } auto featureMapsize = filtersCount * outImageSize; cudaError = cudaMalloc(&featureMaps, featureMapsize * sizeof(*featureMaps)); if (cudaSuccess != cudaError) { freeMemory(); return -1; } // Заполняем выделенную память нулями. cudaError = cudaMemset(filters, 0, kernelWeightsSize * sizeof(*filters)); if (cudaSuccess != cudaError) { freeMemory(); return -1; } cudaError = cudaMemset(inImage, 0, inImageSize * sizeof(*inImage)); if (cudaSuccess != cudaError) { freeMemory(); return -1; } cudaError = cudaMemset(outImage, 0, outImageSize * sizeof(*outImage)); if (cudaSuccess != cudaError) { freeMemory(); return -1; } cudaError = cudaMemset(featureMaps, 0, featureMapsize * sizeof(*featureMaps)); if (cudaSuccess != cudaError) { freeMemory(); return -1; } // Заполняем структуру с размерами элементов, объявленную выше. size.kernelWeightsSize = kernelWeightsSize; size.inImageSize = inImageSize; size.outImageWidth = outImageWidth; size.outImageHeight = outImageHeight; size.outImageSize = outImageSize; size.featureMapsize = featureMapsize; return 0; } } /* Функция ядра, являющаяся имитацией сверточного слоя нейросети. * * Эта функция работает следующим образом * 1. на вход подается изображение inImage. Это изображение имеет структуру трехмерного массива [[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ] * здесь [r, g, b] - это пиксель (так как он состоит из трех цветов) * [[r, g, b], [r, g, b], ..., ] - это строчка пикселов * [[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ] - контейнер таких строчек. * * У изображения есть ширина и высота. Массив имеет размерность [высота [ширина [3]]]. Об этом важно помнить!!! * * Если развернуть это изображение в одномерный массив, то его размерность будет высота Х ширина Х 3, тогда * [[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ] превратится в r, g, b, r, g, b, r, g, b, ... * * ЗАМЕЧАНИЕ * Мы будем говорить о ПИКСЕЛЕ (или ТОЧКЕ) и о ЦВЕТЕ. Пиксель - это [r, g, b] (то есть иногда для удобства мы будем представлять, как будто мы работаем не с трехмерным массивом, а с двухмерным). * Цвет - это конкретное значение (от 0 до 255) конкретного цвета конкретного пикселя. Когда мы будем говорить о цвете, никакой абстракции уже не останется. * * 2. на вход подаются фильтры. Это массив вида [ [[1, 1, 1,], [1, 1, 1], [1, 1, 1]], ... ]. В данном случае передается массив матриц-фильтров 3Х3 * * ЗАМЕЧАНИЯ * фильтры обязательно должны быть квадратными * размерность обязательно нечетная * все фильтры имеют одинаковую размерность * * Фильтры - это матрицы, которые позволяют находить на изображении специальные особенности, подобно тому, как работает зрительная кора головного мозга. * Например, это могут быть прямые линии, линии под наклоном, а могут быть и сложные паттерны типа человеческого лица. * Каждый фильтр после применения к изображению формирует новое изображение меньшей размерности. На этом изображении (в зависимости от того, насколько правильно подобраны веса) * четко отображаются признаки, описанные выше. Все эти изображения формируют карту призаков или featureMaps. Эти изображения мы будем сохранять в одноименную переменную. * Таким образом, featureMaps будет по сути массивом изображений в виде [[[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ], ...] * Сколько фильтов, столько и изображений, причем идут они соответственно номерам, то есть первый фильтр формирует первое изображение в массиве, второй - второе и т.д. * * В тот же самый момент, мы возьмем все карты признаков, попиксельно сложим, разделим на количество фильтров, и результат сохраним в outImage. По способу хренения он идентичен inImage (однако меньшего размера). * * * @param inImage - входное изображение для обработки. * @param width - ширина изображения. * @param height - высота изображения. * @param filters - веса фильтров. * @param filtersCount - количество фильтров. * @param stride - смещение фильтра на следующем шаге. * @param outImage - выходное изображение. * @param featureMaps - карты признаков. * @param outWidth - ширина выходного изображения. * @param outHeight - высота выходного изображения. */ /* __global__ void gpuCNN( const double *inImage, std::size_t width, std::size_t height, double *filters, std::size_t filtersCount, std::size_t filterLineSize, std::size_t stride, double *outImage, double *featureMaps, std::size_t outWidth, std::size_t outHeight) { auto halfLineSize = filterLineSize / 2; stride = (0 == stride) ? 1 : stride; auto outPixelX = threadIdx.x + blockIdx.x * blockDim.x; auto outPixelY = threadIdx.y + blockIdx.y * blockDim.y; if (outPixelX < outWidth && outPixelY < outHeight) { auto pixelX = outPixelX * stride + halfLineSize; auto pixelY = outPixelY * stride + halfLineSize; // Функция ядра на GPU for(std::size_t colorIdx = 0; colorIdx < 3; ++colorIdx) { double outImageSum = 0; auto outColorPos = outPixelY*outWidth*3 + outPixelX*3 + colorIdx; for(std::size_t filterIdx = 0; filterIdx < filtersCount; ++filterIdx) { double currentFilterSum = 0; for(std::size_t i = 0; i < filterLineSize; ++i) { for(std::size_t j = 0; j < filterLineSize; ++j) { auto convPixelX = pixelX + j - halfLineSize; auto convPixelY = pixelY + i - halfLineSize; auto colorPos = convPixelY*width*3 + convPixelX*3 + colorIdx; auto weightPos = filterIdx*filterLineSize*filterLineSize + i*filterLineSize + j; currentFilterSum += inImage[colorPos] * filters[weightPos]; } } outImageSum += currentFilterSum; featureMaps[filterIdx*outWidth*outHeight*3 + outColorPos] = currentFilterSum; } outImage[outColorPos] = outImageSum / (float)filtersCount; } } } */ /******************************************************************************************************************************************************* * * DEVICE * *******************************************************************************************************************************************************/ void cpuCNN( const std::vector<double> &inImage, std::size_t width, std::size_t height, const std::vector<double> &filters, std::size_t filtersCount, std::size_t filterLineSize, std::size_t stride, std::vector<double> &outImage, std::vector<double> &featureMaps, std::size_t outWidth, std::size_t outHeight ) { static const auto halfLineSize = filterLineSize / 2; //stride = (0 == stride) ? 1 : stride; for(std::size_t x = 0; x < outWidth; ++x) { for(std::size_t y = 0; y < outHeight; ++y) { auto pixelX = x * stride + halfLineSize; auto pixelY = y * stride + halfLineSize; // Функция на CPU for(std::size_t colorIdx = 0; colorIdx < 3; ++colorIdx) { double outImageSum = 0; auto outPixelX = (pixelX - halfLineSize) / stride; auto outPixelY = (pixelY - halfLineSize) / stride; auto outColorPos = outPixelY*outWidth*3 + outPixelX*3 + colorIdx; for(std::size_t filterIdx = 0; filterIdx < filtersCount; ++filterIdx) { double currentFilterSum = 0; for(std::size_t i = 0; i < filterLineSize; ++i) { for(std::size_t j = 0; j < filterLineSize; ++j) { auto convPixelX = pixelX + j - halfLineSize; auto convPixelY = pixelY + i - halfLineSize; auto colorPos = convPixelY*width*3 + convPixelX*3 + colorIdx; auto weightPos = filterIdx*filterLineSize*filterLineSize + i*filterLineSize + j; currentFilterSum += inImage[colorPos] * filters[weightPos]; } } outImageSum += currentFilterSum; featureMaps[filterIdx*outWidth*outHeight*3 + outColorPos] = currentFilterSum; } outImage[outColorPos] = outImageSum / (float)filtersCount; } } } } /******************************************************************************************************************************************************* * * MAIN * *******************************************************************************************************************************************************/ namespace CliArgs { // Количество аргументов, которое ожидает программа (имя программы среди них с индексом 0, поэтому количество на 1 больше). static const int N_ARGS = 5; // Путь к входному изображению. static const int IN_FILE_POS = 1; // Размерность файла - ширина. static const int IMG_WIDTH = 2; // Размерность файла - высота. static const int IMG_HEIGHT = 3; // Страйд static const int STRIDE = 4; } /******************************************************************************************************************************************************* * * MAIN * *******************************************************************************************************************************************************/ // ВАЖНО // Дабы окончательно не усложнять программу, проверки вводимых данных делаются по минимуму. int main(int argc, char *argv[]) { #if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) setlocale(0, "russian"); #endif // Проверяем количество аргументов. if (CliArgs::N_ARGS != argc) { cout << "Неверное количество аргументов." << std::endl; getchar(); return 1; } // Извлекаем имена файлов. auto imageFilePath = argv[CliArgs::IN_FILE_POS]; // Извлекаем размерность картинки. auto imageWidth = atoi(argv[CliArgs::IMG_WIDTH]); auto imageHeight = atoi(argv[CliArgs::IMG_HEIGHT]); // Извлекаем страйд. auto stride = atoi(argv[CliArgs::STRIDE]); auto imageSize = imageWidth * imageHeight * 3; // Читаем данные из файла с изображением. std::ifstream ifs(imageFilePath, std::ios_base::in); if (!ifs.is_open()) { cout << "Невозможно открыть файл " << imageFilePath << std::endl; getchar(); return 1; } std::cout << "Начато чтение из файла..." << std::endl; std::vector<double> imageData(imageSize); for (std::size_t i = 0; i < imageSize; ++i) ifs >> imageData[i]; ifs.close(); std::cout << "Чтение закончено" << std::endl; // Заполняем фильтры. auto filterLineSize = 3; auto filtersCount = 4; std::vector<double> filters = { 0.5, -1, 0.5, 0.5, -1, 0.5, 0.5, -1, 0.5, 0, 0, 2, 0, 2, 0, 2, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 2, -.5, -0.5, -0.5, 1, 1, 1, -0.5, -0.5, -0.5 }; // Выделяем память на устройстве. if (0 != Device::allocateMemory(imageWidth, imageHeight, stride, filterLineSize, filtersCount)) { cout << "Ошибка выделения памяти на графической карте" << std::endl; getchar(); return 1; } cout << "Закончено выделение памяти на устройстве" << std::endl; // Копируем данные HOST -> GPU. auto cudaError = cudaSuccess; cudaError = cudaMemcpy(Device::inImage, imageData.data(), imageSize * sizeof(imageData[0]), cudaMemcpyHostToDevice); if (cudaSuccess != cudaError) { cout << "Ошибка при копировании результата на устройство: " << cudaError << std::endl; getchar(); Device::freeMemory(); return 1; } cudaError = cudaMemcpy(Device::filters, filters.data(), filters.size() * sizeof(filters[0]), cudaMemcpyHostToDevice); if (cudaSuccess != cudaError) { cout << "Ошибка при копировании результата на устройство: " << cudaError << std::endl; getchar(); Device::freeMemory(); return 1; } cout << "Закончено копирование данных на устройство" << std::endl; // Расчет на CPU. std::vector<double> cpuOutImage(Device::size.outImageSize, 0.0); std::vector<double> cpuFeatureMaps(Device::size.featureMapsize, 0.0); // Запуск функции на CPU. auto cpuBeginTime = std::chrono::steady_clock::now(); cpuCNN( imageData, imageWidth, imageHeight, filters, filtersCount, filterLineSize, stride, cpuOutImage, cpuFeatureMaps, Device::size.outImageWidth, Device::size.outImageHeight ); auto cpuTime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - cpuBeginTime).count(); // Пишем в файлы. std::cout << "Начата запись изображения в файл..." << std::endl; std::ofstream cpuImgOfs("cpu_out_image.txt", std::ios_base::out | std::ios_base::trunc); if (cpuImgOfs.is_open()) for (std::size_t i = 0; i < Device::size.outImageSize; ++i) cpuImgOfs << static_cast<unsigned int>(cpuOutImage[i]) % 255 << " "; cpuImgOfs.close(); cout << "Запись изображения в файл закончена..." << std::endl; cout << "Начата запись карты признаков в файл..." << std::endl; std::ofstream cpuFmOfs("cpu_out_features.txt", std::ios_base::out | std::ios_base::trunc); if (cpuFmOfs.is_open()) for (std::size_t i = 0; i < Device::size.featureMapsize; ++i) cpuFmOfs << static_cast<unsigned int>(cpuFeatureMaps[i]) % 255 << " "; cpuFmOfs.close(); cout << "Запись карты признаков в файл закончена..." << std::endl; //delete[] cpuOutImage; //delete[] cpuFeatureMaps; // Расчет на GPU. /* dim3 threads(Device::nThreads, Device::nThreads); auto nBlocksX = Device::size.outImageWidth / threads.x; nBlocksX += (0 == Device::size.outImageWidth % threads.x) ? 0 : 1; auto nBlocksY = Device::size.outImageHeight / threads.y; nBlocksY += (0 == Device::size.outImageHeight % threads.y) ? 0 : 1; dim3 blocks(nBlocksX, nBlocksY); Запуск функции ядра. auto gpuBeginTime = std::chrono::steady_clock::now(); gpuCNN <<< blocks, threads >>> ( Device::inImage, imageWidth, imageHeight, Device::filters, filtersCount, filterLineSize, stride, Device::outImage, Device::featureMaps, Device::size.outImageWidth, Device::size.outImageHeight ); cudaDeviceSynchronize(); auto gpuTime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - gpuBeginTime).count(); cout << "Закончен расчет на GPU" << std::endl; Теперь тащим с GPU результат. auto outImage = new double[Device::size.outImageSize]; auto featureMaps = new double[Device::size.featureMapsize]; cudaError = cudaMemcpy(outImage, Device::outImage, Device::size.outImageSize * sizeof(*outImage), cudaMemcpyDeviceToHost); if (cudaSuccess != cudaError) { cout << "Ошибка при копировании изображения с устройства: " << cudaError << std::endl; getchar(); Device::freeMemory(); return 1; } cudaError = cudaMemcpy(featureMaps, Device::featureMaps, Device::size.featureMapsize * sizeof(*featureMaps), cudaMemcpyDeviceToHost); if (cudaSuccess != cudaError) { cout << "Ошибка при копировании карт признаков с устройства: " << cudaError << std::endl; getchar(); Device::freeMemory(); return 1; } Device::freeMemory(); cout << "Копирование результата с устройства закончено" << std::endl; Пишем в файлы. std::cout << "Начата запись изображения в файл..." << std::endl; std::ofstream imgOfs("out_image.txt", std::ios_base::out | std::ios_base::trunc); if (imgOfs.is_open()) for (std::size_t i = 0; i < Device::size.outImageSize; ++i) imgOfs << static_cast<unsigned int>(outImage[i]) % 255 << " "; imgOfs.close(); cout << "Запись изображения в файл закончена..." << std::endl; cout << "Начата запись карты признаков в файл..." << std::endl; std::ofstream fmOfs("out_features.txt", std::ios_base::out | std::ios_base::trunc); if (fmOfs.is_open()) for (std::size_t i = 0; i < Device::size.featureMapsize; ++i) fmOfs << static_cast<unsigned int>(featureMaps[i]) % 255 << " "; fmOfs.close(); cout << "Запись карты признаков в файл закончена..." << std::endl; delete[] outImage; delete[] featureMaps; */ cout << std::endl << std::endl; cout << "Полученное изображение имеет параметры " << Device::size.outImageWidth << " X " << Device::size.outImageHeight << std::endl; cout << "Карта признаков имеет параметры " << Device::size.outImageWidth << " X " << Device::size.outImageHeight * filtersCount << std::endl << std::endl; cout << "Время на CPU: " << cpuTime << " миллисекунд "<< std::endl; //cout << "Время на GPU: " << gpuTime << " миллисекунд" << std::endl; cout << "Для выхода нажмите Enter." << std::endl; getchar(); return 0; }
c0690e971260fd86c5ee84f44ed5baff7cacb041.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_latency_setup_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *buffer = NULL; hipMalloc(&buffer, XSIZE*YSIZE); size_t delta = 1; size_t elements = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_latency_setup_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,delta,elements); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_latency_setup_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,delta,elements); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_latency_setup_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,delta,elements); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c0690e971260fd86c5ee84f44ed5baff7cacb041.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_latency_setup_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *buffer = NULL; cudaMalloc(&buffer, XSIZE*YSIZE); size_t delta = 1; size_t elements = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_latency_setup_kernel<<<gridBlock,threadBlock>>>(buffer,delta,elements); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_latency_setup_kernel<<<gridBlock,threadBlock>>>(buffer,delta,elements); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_latency_setup_kernel<<<gridBlock,threadBlock>>>(buffer,delta,elements); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ea681896308b307e52658f023ba5aaf7eaa17201.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel.h" #include "reducedMath.h" #include <iostream> using nvinfer1::rt::reduced_divisor; template <unsigned nthdsPerCTA> __launch_bounds__(nthdsPerCTA) __global__ void gridAnchorKernel( const GridAnchorParameters param, const int numAspectRatios, reduced_divisor divObj, const float* widths, const float* heights, float* outputData ) { // output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4) const int dim = param.H * param.W * numAspectRatios; /* * Parameters used to calculate the bounding box coordinates back to input image scale * Normally we calculate the anchorStride = image_input_size (in pixel) / feature_map_size * Here we do not use image_input_size for the moment * Instead we use 1.0 * The coordinates calculated are scaled by the input image size. * Most of the coordinates will be in a range of [0, 1], except for the bounding box coordinates going outside of the image * Every coordinate will go back to the pixel coordinates in the input image if being multiplied by image_input_size * Here we implicitly assumes the image input and feature map are square */ float anchorStride = (1.0 / param.H); float anchorOffset = 0.5 * anchorStride; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= dim) { return; } int arId, currIndex; divObj.divmod(tid, currIndex, arId); const int w = currIndex % param.W; const int h = currIndex / param.W; // Center coordinates float yC = h * anchorStride + anchorOffset; float xC = w * anchorStride + anchorOffset; // x_min, y_min float xMin = xC - 0.5 * widths[arId]; float yMin = yC - 0.5 * heights[arId]; // x_max, y_max float xMax = xC + 0.5 * widths[arId]; float yMax = yC + 0.5 * heights[arId]; outputData[tid * 4] = xMin; outputData[tid * 4 + 1] = yMin; outputData[tid * 4 + 2] = xMax; outputData[tid * 4 + 3] = yMax; // Remember to move the output cursor float* output = outputData + dim * 4; // Simply copying the variance output[tid * 4] = param.variance[0]; output[tid * 4 + 1] = param.variance[1]; output[tid * 4 + 2] = param.variance[2]; output[tid * 4 + 3] = param.variance[3]; } using nvinfer1::rt::reduced_divisor; template <unsigned nthdsPerCTA> __launch_bounds__(nthdsPerCTA) __global__ void gridAnchorRectKernel( const GridAnchorParameters param, const int numAspectRatios, reduced_divisor divObj, const float* widths, const float* heights, float* outputData ) { // output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4) const int dim = param.H * param.W * numAspectRatios; /* * Parameters used to calculate the bounding box coordinates back to input image scale * Normally we calculate the anchorStride = image_input_size (in pixel) / feature_map_size * Here we do not use image_input_size for the moment * Instead we use 1.0 * The coordinates calculated are scaled by the input image size. * Most of the coordinates will be in a range of [0, 1], except for the bounding box coordinates going outside of the image * Every coordinate will go back to the pixel coordinates in the input image if being multiplied by image_input_size * Here we implicitly assumes the image input and feature map are square */ float anchorStrideH = (1.0 / param.H); float anchorStrideW = (1.0 / param.W); float anchorOffsetH = 0.5 * anchorStrideH; float anchorOffsetW = 0.5 * anchorStrideW; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= dim) { return; } int arId, currIndex; divObj.divmod(tid, currIndex, arId); const int w = currIndex % param.W; const int h = currIndex / param.W; // Center coordinates float yC = h * anchorStrideH + anchorOffsetH; float xC = w * anchorStrideW + anchorOffsetW; // x_min, y_min float xMin = xC - 0.5 * widths[arId]; float yMin = yC - 0.5 * heights[arId]; // x_max, y_max float xMax = xC + 0.5 * widths[arId]; float yMax = yC + 0.5 * heights[arId]; outputData[tid * 4] = xMin; outputData[tid * 4 + 1] = yMin; outputData[tid * 4 + 2] = xMax; outputData[tid * 4 + 3] = yMax; // Remember to move the output cursor float* output = outputData + dim * 4; // Simply copying the variance output[tid * 4] = param.variance[0]; output[tid * 4 + 1] = param.variance[1]; output[tid * 4 + 2] = param.variance[2]; output[tid * 4 + 3] = param.variance[3]; } pluginStatus_t anchorGridInference( hipStream_t stream, const GridAnchorParameters param, const int numAspectRatios, const void* widths, const void* heights, void* outputData ) { const int dim = param.H * param.W * numAspectRatios; reduced_divisor divObj(numAspectRatios); if (dim > 5120) { const int BS = 128; const int GS = (dim + BS - 1) / BS; hipLaunchKernelGGL(( gridAnchorKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } else { const int BS = 32; const int GS = (dim + BS - 1) / BS; hipLaunchKernelGGL(( gridAnchorKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; }
ea681896308b307e52658f023ba5aaf7eaa17201.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel.h" #include "reducedMath.h" #include <iostream> using nvinfer1::rt::reduced_divisor; template <unsigned nthdsPerCTA> __launch_bounds__(nthdsPerCTA) __global__ void gridAnchorKernel( const GridAnchorParameters param, const int numAspectRatios, reduced_divisor divObj, const float* widths, const float* heights, float* outputData ) { // output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4) const int dim = param.H * param.W * numAspectRatios; /* * Parameters used to calculate the bounding box coordinates back to input image scale * Normally we calculate the anchorStride = image_input_size (in pixel) / feature_map_size * Here we do not use image_input_size for the moment * Instead we use 1.0 * The coordinates calculated are scaled by the input image size. * Most of the coordinates will be in a range of [0, 1], except for the bounding box coordinates going outside of the image * Every coordinate will go back to the pixel coordinates in the input image if being multiplied by image_input_size * Here we implicitly assumes the image input and feature map are square */ float anchorStride = (1.0 / param.H); float anchorOffset = 0.5 * anchorStride; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= dim) { return; } int arId, currIndex; divObj.divmod(tid, currIndex, arId); const int w = currIndex % param.W; const int h = currIndex / param.W; // Center coordinates float yC = h * anchorStride + anchorOffset; float xC = w * anchorStride + anchorOffset; // x_min, y_min float xMin = xC - 0.5 * widths[arId]; float yMin = yC - 0.5 * heights[arId]; // x_max, y_max float xMax = xC + 0.5 * widths[arId]; float yMax = yC + 0.5 * heights[arId]; outputData[tid * 4] = xMin; outputData[tid * 4 + 1] = yMin; outputData[tid * 4 + 2] = xMax; outputData[tid * 4 + 3] = yMax; // Remember to move the output cursor float* output = outputData + dim * 4; // Simply copying the variance output[tid * 4] = param.variance[0]; output[tid * 4 + 1] = param.variance[1]; output[tid * 4 + 2] = param.variance[2]; output[tid * 4 + 3] = param.variance[3]; } using nvinfer1::rt::reduced_divisor; template <unsigned nthdsPerCTA> __launch_bounds__(nthdsPerCTA) __global__ void gridAnchorRectKernel( const GridAnchorParameters param, const int numAspectRatios, reduced_divisor divObj, const float* widths, const float* heights, float* outputData ) { // output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4) const int dim = param.H * param.W * numAspectRatios; /* * Parameters used to calculate the bounding box coordinates back to input image scale * Normally we calculate the anchorStride = image_input_size (in pixel) / feature_map_size * Here we do not use image_input_size for the moment * Instead we use 1.0 * The coordinates calculated are scaled by the input image size. * Most of the coordinates will be in a range of [0, 1], except for the bounding box coordinates going outside of the image * Every coordinate will go back to the pixel coordinates in the input image if being multiplied by image_input_size * Here we implicitly assumes the image input and feature map are square */ float anchorStrideH = (1.0 / param.H); float anchorStrideW = (1.0 / param.W); float anchorOffsetH = 0.5 * anchorStrideH; float anchorOffsetW = 0.5 * anchorStrideW; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= dim) { return; } int arId, currIndex; divObj.divmod(tid, currIndex, arId); const int w = currIndex % param.W; const int h = currIndex / param.W; // Center coordinates float yC = h * anchorStrideH + anchorOffsetH; float xC = w * anchorStrideW + anchorOffsetW; // x_min, y_min float xMin = xC - 0.5 * widths[arId]; float yMin = yC - 0.5 * heights[arId]; // x_max, y_max float xMax = xC + 0.5 * widths[arId]; float yMax = yC + 0.5 * heights[arId]; outputData[tid * 4] = xMin; outputData[tid * 4 + 1] = yMin; outputData[tid * 4 + 2] = xMax; outputData[tid * 4 + 3] = yMax; // Remember to move the output cursor float* output = outputData + dim * 4; // Simply copying the variance output[tid * 4] = param.variance[0]; output[tid * 4 + 1] = param.variance[1]; output[tid * 4 + 2] = param.variance[2]; output[tid * 4 + 3] = param.variance[3]; } pluginStatus_t anchorGridInference( cudaStream_t stream, const GridAnchorParameters param, const int numAspectRatios, const void* widths, const void* heights, void* outputData ) { const int dim = param.H * param.W * numAspectRatios; reduced_divisor divObj(numAspectRatios); if (dim > 5120) { const int BS = 128; const int GS = (dim + BS - 1) / BS; gridAnchorKernel<BS><<<GS, BS, 0, stream>>>(param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } else { const int BS = 32; const int GS = (dim + BS - 1) / BS; gridAnchorKernel<BS><<<GS, BS, 0, stream>>>(param, numAspectRatios, divObj, (const float*) widths, (const float*) heights, (float*) outputData); } CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; }
8624fe8c99d68f0fc5b3629b3cef182009e1e4d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<string.h> #include<math_constants.h> //#include<cuseful.h> //#include <R.h> #include "hcluster.h" #define NUM_THREADS 32 #define NUM_BLOCKS 1024 #if 0 // Distance matrix __device__ float * hcluster_dist_d; // Number of elements in each cluster __device__ float * hcluster_count_d; // Arrays for finding the minimum of each row and column containing the minimum __device__ float * hcluster_min_val_d; __device__ size_t * hcluster_min_col_d; // Arrays telling which cluster merged with which cluster __device__ int * hcluster_sub_d; __device__ int * hcluster_sup_d; // Array of the values merged at __device__ float * hcluster_merge_val_d; #endif __global__ void complete_kernel(float * dist, const size_t pitch_dist, const size_t n, const int * sub, const int * sup, const float * count, const float * val, const size_t iter, const size_t col_offset, const float lambda, const float beta) { const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x; // If it matters if(col < n) { int col_winner = sub[iter], row_winner = sup[iter]; float top_val = dist[col_winner * pitch_dist + col], bot_val = dist[row_winner * pitch_dist + col]; bot_val = fmaxf(bot_val, top_val); if((col == col_winner) || (col == row_winner)) bot_val = CUDART_INF_F; top_val = CUDART_INF_F; // Write out dist[col_winner * pitch_dist + col] = top_val; dist[col * pitch_dist + col_winner] = top_val; dist[row_winner * pitch_dist + col] = bot_val; dist[col * pitch_dist + row_winner] = bot_val; } } __global__ void convert_kernel(float * dist, size_t pitch_dist, size_t n) { for(size_t index = threadIdx.x; index < n; index += NUM_THREADS) { dist[index * pitch_dist + index] = CUDART_INF_F; } }
8624fe8c99d68f0fc5b3629b3cef182009e1e4d1.cu
#include<stdio.h> #include<string.h> #include<math_constants.h> //#include<cuseful.h> //#include <R.h> #include "hcluster.h" #define NUM_THREADS 32 #define NUM_BLOCKS 1024 #if 0 // Distance matrix __device__ float * hcluster_dist_d; // Number of elements in each cluster __device__ float * hcluster_count_d; // Arrays for finding the minimum of each row and column containing the minimum __device__ float * hcluster_min_val_d; __device__ size_t * hcluster_min_col_d; // Arrays telling which cluster merged with which cluster __device__ int * hcluster_sub_d; __device__ int * hcluster_sup_d; // Array of the values merged at __device__ float * hcluster_merge_val_d; #endif __global__ void complete_kernel(float * dist, const size_t pitch_dist, const size_t n, const int * sub, const int * sup, const float * count, const float * val, const size_t iter, const size_t col_offset, const float lambda, const float beta) { const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x; // If it matters if(col < n) { int col_winner = sub[iter], row_winner = sup[iter]; float top_val = dist[col_winner * pitch_dist + col], bot_val = dist[row_winner * pitch_dist + col]; bot_val = fmaxf(bot_val, top_val); if((col == col_winner) || (col == row_winner)) bot_val = CUDART_INF_F; top_val = CUDART_INF_F; // Write out dist[col_winner * pitch_dist + col] = top_val; dist[col * pitch_dist + col_winner] = top_val; dist[row_winner * pitch_dist + col] = bot_val; dist[col * pitch_dist + row_winner] = bot_val; } } __global__ void convert_kernel(float * dist, size_t pitch_dist, size_t n) { for(size_t index = threadIdx.x; index < n; index += NUM_THREADS) { dist[index * pitch_dist + index] = CUDART_INF_F; } }
461a3cfa6fc557444f7a4d7b6605a940904c5b1b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "arrayToData.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *g_odata = NULL; hipMalloc(&g_odata, XSIZE*YSIZE); uint *keys = NULL; hipMalloc(&keys, XSIZE*YSIZE); int imgw = 1; int imgh = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( arrayToData), dim3(gridBlock),dim3(threadBlock), 0, 0, g_odata,keys,imgw,imgh); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( arrayToData), dim3(gridBlock),dim3(threadBlock), 0, 0, g_odata,keys,imgw,imgh); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( arrayToData), dim3(gridBlock),dim3(threadBlock), 0, 0, g_odata,keys,imgw,imgh); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
461a3cfa6fc557444f7a4d7b6605a940904c5b1b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "arrayToData.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *g_odata = NULL; cudaMalloc(&g_odata, XSIZE*YSIZE); uint *keys = NULL; cudaMalloc(&keys, XSIZE*YSIZE); int imgw = 1; int imgh = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); arrayToData<<<gridBlock,threadBlock>>>(g_odata,keys,imgw,imgh); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { arrayToData<<<gridBlock,threadBlock>>>(g_odata,keys,imgw,imgh); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { arrayToData<<<gridBlock,threadBlock>>>(g_odata,keys,imgw,imgh); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f859dbf5bc2f106f0ad602824ccafd2891a33f27.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <time.h> #include <hip/hip_runtime.h> #include <opencv2/highgui/highgui.hpp> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> // import no include errors #include <fstream> #include <string> #define RED 2 #define GREEN 1 #define BLUE 0 #define TILE_SIZE 32 #define MASK_WIDTH 3 __constant__ float CMask[MASK_WIDTH*MASK_WIDTH]; using namespace cv; using namespace std; __device__ unsigned char setNumber(int value){ if(value < 0) value = 0; else if(value > 255) value = 255; return (unsigned char)value; } __global__ void sobelFilterSM(unsigned char *imageInput, int width, int height, \ unsigned int maskWidth,unsigned char *imageOutput){ __shared__ float N_ds[TILE_SIZE + MASK_WIDTH - 1][TILE_SIZE+ MASK_WIDTH - 1]; int n = maskWidth/2, dest = threadIdx.y*TILE_SIZE+threadIdx.x, destY = dest / (TILE_SIZE+MASK_WIDTH-1), destX = dest % (TILE_SIZE+MASK_WIDTH-1), srcY = blockIdx.y * TILE_SIZE + destY - n, srcX = blockIdx.x * TILE_SIZE + destX - n, src = (srcY * width + srcX); if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width){N_ds[destY][destX] = imageInput[src];} else{N_ds[destY][destX] = 0;} dest = threadIdx.y * TILE_SIZE + threadIdx.x + TILE_SIZE * TILE_SIZE; destY = dest /(TILE_SIZE + MASK_WIDTH - 1), destX = dest % (TILE_SIZE + MASK_WIDTH - 1); srcY = blockIdx.y * TILE_SIZE + destY - n; srcX = blockIdx.x * TILE_SIZE + destX - n; src = (srcY * width + srcX); if (destY < TILE_SIZE + MASK_WIDTH - 1) { if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) N_ds[destY][destX] = imageInput[src]; else N_ds[destY][destX] = 0; } __syncthreads(); int accum = 0, y, x; for (y = 0; y < maskWidth; y++) for (x = 0; x < maskWidth; x++) accum += N_ds[threadIdx.y + y][threadIdx.x + x] * CMask[y * maskWidth + x]; y = blockIdx.y * TILE_SIZE + threadIdx.y; x = blockIdx.x * TILE_SIZE + threadIdx.x; if (y < height && x < width) imageOutput[(y * width + x)] = setNumber(accum); __syncthreads(); } __global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){ int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if((row < height) && (col < width)){ imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 + imageInput[(row*width+col)*3+BLUE]*0.114; } } int main(int argc, char **argv){ hipSetDevice(0);//GTX980 hipError_t error = hipSuccess; clock_t start, end; double cpu_time_used; float h_CMask[] = {-1, 0, 1, -2, 0, 2, -1, 0, 1}; unsigned char *h_dataImage, *d_dataImage, *d_imageOutput, *h_imageOutput, *d_sobelOutput; hipEvent_t startGPU, stopGPU; hipEventCreate(&startGPU); hipEventCreate(&stopGPU); int times = 1; if(argc !=3 && argc != 4){ printf("Enter the image's name and to repeat (op w)\n"); return -1; } bool writeImage = false; if (argc == 4){ writeImage = true; } char* imageName = argv[1]; times = atoi(argv[2]); Mat image; image = imread(imageName, 1); if(!image.data){return -1;} Size s = image.size(); int width = s.width; int height = s.height; int size = sizeof(unsigned char)*width*height*image.channels(); int sizeGray = sizeof(unsigned char)*width*height; string text = string(imageName)+"SCMTimes"; for (int i = 0; i < times; i++){ error = hipMemcpyToSymbol(CMask,h_CMask,sizeof(float)*MASK_WIDTH*MASK_WIDTH); if(error != hipSuccess){printf("Error in Mask \n");exit(-1);} h_dataImage = (unsigned char*)malloc(size); error = hipMalloc((void**)&d_dataImage, size); if(error != hipSuccess){printf("Error-> memory allocation of d_dataImage\n");exit(-1);} h_imageOutput = (unsigned char *)malloc(sizeGray); error = hipMalloc((void**)&d_imageOutput, sizeGray); if(error != hipSuccess){printf("Error-> memory allocation of d_imageOutput\n");exit(-1);} error = hipMalloc((void**)&d_sobelOutput, sizeGray); if(error != hipSuccess){printf("Error-> memory allocation of d_sobelOutput\n");exit(-1);} h_dataImage = image.data; error = hipMemcpy(d_dataImage, h_dataImage, size, hipMemcpyHostToDevice); if(error != hipSuccess){printf("Error sending data from host to device in dataImage\n");exit(-1);} int blockSize = TILE_SIZE; dim3 dimBlock(blockSize, blockSize, 1); dim3 dimGrid(ceil(width/float(blockSize)), ceil(height/float(blockSize)), 1); hipLaunchKernelGGL(( img2gray), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dataImage, width, height, d_imageOutput); hipDeviceSynchronize(); hipEventRecord(startGPU); hipLaunchKernelGGL(( sobelFilterSM), dim3(dimGrid), dim3(dimBlock), 0, 0, d_imageOutput, width, height, MASK_WIDTH, d_sobelOutput); hipDeviceSynchronize(); hipEventRecord(stopGPU); error = hipMemcpy(h_imageOutput, d_sobelOutput, sizeGray, hipMemcpyDeviceToHost); if(error != hipSuccess){printf("Error sending data from device to host in imageOutput\n");exit(-1);} hipEventSynchronize(stopGPU); float milliseconds = 0; hipEventElapsedTime(&milliseconds, startGPU, stopGPU); Mat image_sobel; image_sobel.create(height, width, CV_8UC1); image_sobel.data = h_imageOutput; start = clock(); Mat image_sobel_opencv, grad_x, abs_grad_x; cvtColor(image, image_sobel_opencv, CV_BGR2GRAY); Sobel(image_sobel_opencv, grad_x, CV_8UC1, 1, 0, 3, 1, 0, BORDER_DEFAULT); convertScaleAbs(grad_x, abs_grad_x); end = clock(); if (writeImage){ imwrite("./SMsobel.jpg", image_sobel); writeImage = false; } // namedWindow(imageName, WINDOW_NORMAL); namedWindow("Gray Image CUDA", WINDOW_NORMAL); //namedWindow("Sobel Image OpenCV", WINDOW_NORMAL); // imshow(imageName,image); imshow("Gray Image CUDA", image_sobel); // imshow("Sobel Image OpenCV",abs_grad_x); //waitKey(0); cpu_time_used = ((double) (end - start)) /CLOCKS_PER_SEC; printf("Time in CPU: %.10f, time in GPU: %.10f\n", cpu_time_used, milliseconds); ofstream outfile(text.c_str(),ios::binary | ios::app); outfile << cpu_time_used*1000 <<", "<< milliseconds << "\n"; outfile.close(); hipFree(d_dataImage); hipFree(d_imageOutput); hipFree(d_sobelOutput); } return 0; }
f859dbf5bc2f106f0ad602824ccafd2891a33f27.cu
#include <stdio.h> #include <time.h> #include <cuda.h> #include <opencv2/highgui/highgui.hpp> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> // import no include errors #include <fstream> #include <string> #define RED 2 #define GREEN 1 #define BLUE 0 #define TILE_SIZE 32 #define MASK_WIDTH 3 __constant__ float CMask[MASK_WIDTH*MASK_WIDTH]; using namespace cv; using namespace std; __device__ unsigned char setNumber(int value){ if(value < 0) value = 0; else if(value > 255) value = 255; return (unsigned char)value; } __global__ void sobelFilterSM(unsigned char *imageInput, int width, int height, \ unsigned int maskWidth,unsigned char *imageOutput){ __shared__ float N_ds[TILE_SIZE + MASK_WIDTH - 1][TILE_SIZE+ MASK_WIDTH - 1]; int n = maskWidth/2, dest = threadIdx.y*TILE_SIZE+threadIdx.x, destY = dest / (TILE_SIZE+MASK_WIDTH-1), destX = dest % (TILE_SIZE+MASK_WIDTH-1), srcY = blockIdx.y * TILE_SIZE + destY - n, srcX = blockIdx.x * TILE_SIZE + destX - n, src = (srcY * width + srcX); if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width){N_ds[destY][destX] = imageInput[src];} else{N_ds[destY][destX] = 0;} dest = threadIdx.y * TILE_SIZE + threadIdx.x + TILE_SIZE * TILE_SIZE; destY = dest /(TILE_SIZE + MASK_WIDTH - 1), destX = dest % (TILE_SIZE + MASK_WIDTH - 1); srcY = blockIdx.y * TILE_SIZE + destY - n; srcX = blockIdx.x * TILE_SIZE + destX - n; src = (srcY * width + srcX); if (destY < TILE_SIZE + MASK_WIDTH - 1) { if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) N_ds[destY][destX] = imageInput[src]; else N_ds[destY][destX] = 0; } __syncthreads(); int accum = 0, y, x; for (y = 0; y < maskWidth; y++) for (x = 0; x < maskWidth; x++) accum += N_ds[threadIdx.y + y][threadIdx.x + x] * CMask[y * maskWidth + x]; y = blockIdx.y * TILE_SIZE + threadIdx.y; x = blockIdx.x * TILE_SIZE + threadIdx.x; if (y < height && x < width) imageOutput[(y * width + x)] = setNumber(accum); __syncthreads(); } __global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){ int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if((row < height) && (col < width)){ imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 + imageInput[(row*width+col)*3+BLUE]*0.114; } } int main(int argc, char **argv){ cudaSetDevice(0);//GTX980 cudaError_t error = cudaSuccess; clock_t start, end; double cpu_time_used; float h_CMask[] = {-1, 0, 1, -2, 0, 2, -1, 0, 1}; unsigned char *h_dataImage, *d_dataImage, *d_imageOutput, *h_imageOutput, *d_sobelOutput; cudaEvent_t startGPU, stopGPU; cudaEventCreate(&startGPU); cudaEventCreate(&stopGPU); int times = 1; if(argc !=3 && argc != 4){ printf("Enter the image's name and to repeat (op w)\n"); return -1; } bool writeImage = false; if (argc == 4){ writeImage = true; } char* imageName = argv[1]; times = atoi(argv[2]); Mat image; image = imread(imageName, 1); if(!image.data){return -1;} Size s = image.size(); int width = s.width; int height = s.height; int size = sizeof(unsigned char)*width*height*image.channels(); int sizeGray = sizeof(unsigned char)*width*height; string text = string(imageName)+"SCMTimes"; for (int i = 0; i < times; i++){ error = cudaMemcpyToSymbol(CMask,h_CMask,sizeof(float)*MASK_WIDTH*MASK_WIDTH); if(error != cudaSuccess){printf("Error in Mask \n");exit(-1);} h_dataImage = (unsigned char*)malloc(size); error = cudaMalloc((void**)&d_dataImage, size); if(error != cudaSuccess){printf("Error-> memory allocation of d_dataImage\n");exit(-1);} h_imageOutput = (unsigned char *)malloc(sizeGray); error = cudaMalloc((void**)&d_imageOutput, sizeGray); if(error != cudaSuccess){printf("Error-> memory allocation of d_imageOutput\n");exit(-1);} error = cudaMalloc((void**)&d_sobelOutput, sizeGray); if(error != cudaSuccess){printf("Error-> memory allocation of d_sobelOutput\n");exit(-1);} h_dataImage = image.data; error = cudaMemcpy(d_dataImage, h_dataImage, size, cudaMemcpyHostToDevice); if(error != cudaSuccess){printf("Error sending data from host to device in dataImage\n");exit(-1);} int blockSize = TILE_SIZE; dim3 dimBlock(blockSize, blockSize, 1); dim3 dimGrid(ceil(width/float(blockSize)), ceil(height/float(blockSize)), 1); img2gray<<<dimGrid, dimBlock>>>(d_dataImage, width, height, d_imageOutput); cudaDeviceSynchronize(); cudaEventRecord(startGPU); sobelFilterSM<<<dimGrid, dimBlock>>>(d_imageOutput, width, height, MASK_WIDTH, d_sobelOutput); cudaDeviceSynchronize(); cudaEventRecord(stopGPU); error = cudaMemcpy(h_imageOutput, d_sobelOutput, sizeGray, cudaMemcpyDeviceToHost); if(error != cudaSuccess){printf("Error sending data from device to host in imageOutput\n");exit(-1);} cudaEventSynchronize(stopGPU); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, startGPU, stopGPU); Mat image_sobel; image_sobel.create(height, width, CV_8UC1); image_sobel.data = h_imageOutput; start = clock(); Mat image_sobel_opencv, grad_x, abs_grad_x; cvtColor(image, image_sobel_opencv, CV_BGR2GRAY); Sobel(image_sobel_opencv, grad_x, CV_8UC1, 1, 0, 3, 1, 0, BORDER_DEFAULT); convertScaleAbs(grad_x, abs_grad_x); end = clock(); if (writeImage){ imwrite("./SMsobel.jpg", image_sobel); writeImage = false; } // namedWindow(imageName, WINDOW_NORMAL); namedWindow("Gray Image CUDA", WINDOW_NORMAL); //namedWindow("Sobel Image OpenCV", WINDOW_NORMAL); // imshow(imageName,image); imshow("Gray Image CUDA", image_sobel); // imshow("Sobel Image OpenCV",abs_grad_x); //waitKey(0); cpu_time_used = ((double) (end - start)) /CLOCKS_PER_SEC; printf("Time in CPU: %.10f, time in GPU: %.10f\n", cpu_time_used, milliseconds); ofstream outfile(text.c_str(),ios::binary | ios::app); outfile << cpu_time_used*1000 <<", "<< milliseconds << "\n"; outfile.close(); cudaFree(d_dataImage); cudaFree(d_imageOutput); cudaFree(d_sobelOutput); } return 0; }
5bd4138b381818f7e8b78c2a8ef43ea8cd94c759.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vdiv.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); const float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); float *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vdiv), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vdiv), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vdiv), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5bd4138b381818f7e8b78c2a8ef43ea8cd94c759.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vdiv.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); const float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); float *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vdiv<<<gridBlock,threadBlock>>>(a,b,c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vdiv<<<gridBlock,threadBlock>>>(a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vdiv<<<gridBlock,threadBlock>>>(a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cf8744a7bb3eac34eb499f4744fa05cd7380c2eb.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <stdio.h> #include <numeric> #include <vector> #include <iostream> #include <iomanip> #define PI_F 3.141592654f #define DEBUG 1 //undef DEBUG void printArrayAsMatrix(const float* in, const size_t& width, const size_t& height) { #ifdef DEBUG std::cout <<"Printing "<<width<<","<<height<<" array"<< std::endl; for (size_t j = 0; j < height; ++j) { for (size_t i = 0; i < width; ++i) { std::cout <<std::fixed << std::setw(5) // space between numbers << std::setprecision(2) // nubmers after decimal point << in[width*j + i] << ' '; } std::cout << std::endl; } #endif } __global__ void rotateKernel (float * output, hipTextureObject_t texObj, int width, int height, float theta) { // Calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; // And regular coordinates unsigned int idx= y * width + x; // Transform coordinates u -= 0.5f; v -= 0.5f; float tu = u * cosf(theta) - v * sinf(theta) + 0.5f; float tv = v * cosf(theta) + u * sinf(theta) + 0.5f; // Read from texture and write to global memory output[idx] = tex2D<float>(texObj, tu, tv); } int main () { // Inputs size_t width = 16; size_t height = 16; size_t size = width * height * sizeof(float); float angle = 90; // in degrees float theta = angle/180*PI_F; // in rad // Initialize host array float * h_data = (float*)malloc(size); for (int i =0; i<height*width; ++i) h_data[i] =(float)i/(height*width); memset(h_data, 0, size/4); // hipArray obj will have elements of 32bits, representing single-precision // floating point numbers hipChannelFormatDesc ch_desc = hipCreateChannelDesc(32,0,0,0, hipChannelFormatKindFloat); hipArray* cu_array; checkCudaErrors(hipMallocArray(&cu_array, &ch_desc, width, height)); checkCudaErrors(hipMemcpyToArray(cu_array, 0, 0, h_data, size, hipMemcpyHostToDevice)); // Specify texture // Texture is going to be bound to a 1D Array, with name cu_array struct hipResourceDesc res_desc; memset(&res_desc, 0, sizeof(res_desc)); res_desc.resType = hipResourceTypeArray; res_desc.res.array.array = cu_array; // Specify texture object parameters // - Clamp mode: if out of bounds clamp index to closest 0 or width | 0 or height // - Without interpoation // - No conversion/normalization of the value read // - Coordinates are normalized to -1,1: useful for trigonometry struct hipTextureDesc tex_desc; memset(&tex_desc, 0, sizeof(tex_desc)); tex_desc.addressMode[0] = hipAddressModeClamp; tex_desc.addressMode[1] = hipAddressModeClamp; tex_desc.filterMode = hipFilterModePoint; tex_desc.readMode = hipReadModeElementType; tex_desc.normalizedCoords = 1; // Copy host memory to hipArray checkCudaErrors(hipMemcpyToArray(cu_array, 0, 0, h_data, size, hipMemcpyHostToDevice)); // Create texture object hipTextureObject_t tex_obj = 0; hipCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL); // Allocate result of transformation in device memory float* d_output; checkCudaErrors(hipMalloc(&d_output, size)); // Print host array printArrayAsMatrix(h_data, width, height); // Invoke kernel rotating it once dim3 dimBlock(16, 16); dim3 dimGrid( (width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( rotateKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_output, tex_obj, width, height, theta); // Print result array checkCudaErrors(hipMemcpy(h_data, d_output, size, hipMemcpyDeviceToHost)); printArrayAsMatrix(h_data, width, height); // Copy old result to texture and Invoke kernel rotating it again checkCudaErrors(hipMemcpyToArray(cu_array, 0, 0, d_output, size, hipMemcpyDeviceToDevice)); hipLaunchKernelGGL(( rotateKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_output, tex_obj, width, height, theta); // Print result array checkCudaErrors(hipMemcpy(h_data, d_output, size, hipMemcpyDeviceToHost)); printArrayAsMatrix(h_data, width, height); // Destroy texture object checkCudaErrors(hipDestroyTextureObject(tex_obj)); // Free device memory checkCudaErrors(hipFreeArray(cu_array)); checkCudaErrors(hipFree(d_output)); // Free host memory free(h_data); }
cf8744a7bb3eac34eb499f4744fa05cd7380c2eb.cu
#include <cuda_runtime.h> #include <helper_cuda.h> #include <stdio.h> #include <numeric> #include <vector> #include <iostream> #include <iomanip> #define PI_F 3.141592654f #define DEBUG 1 //undef DEBUG void printArrayAsMatrix(const float* in, const size_t& width, const size_t& height) { #ifdef DEBUG std::cout <<"Printing "<<width<<","<<height<<" array"<< std::endl; for (size_t j = 0; j < height; ++j) { for (size_t i = 0; i < width; ++i) { std::cout <<std::fixed << std::setw(5) // space between numbers << std::setprecision(2) // nubmers after decimal point << in[width*j + i] << ' '; } std::cout << std::endl; } #endif } __global__ void rotateKernel (float * output, cudaTextureObject_t texObj, int width, int height, float theta) { // Calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; // And regular coordinates unsigned int idx= y * width + x; // Transform coordinates u -= 0.5f; v -= 0.5f; float tu = u * cosf(theta) - v * sinf(theta) + 0.5f; float tv = v * cosf(theta) + u * sinf(theta) + 0.5f; // Read from texture and write to global memory output[idx] = tex2D<float>(texObj, tu, tv); } int main () { // Inputs size_t width = 16; size_t height = 16; size_t size = width * height * sizeof(float); float angle = 90; // in degrees float theta = angle/180*PI_F; // in rad // Initialize host array float * h_data = (float*)malloc(size); for (int i =0; i<height*width; ++i) h_data[i] =(float)i/(height*width); memset(h_data, 0, size/4); // cudaArray obj will have elements of 32bits, representing single-precision // floating point numbers cudaChannelFormatDesc ch_desc = cudaCreateChannelDesc(32,0,0,0, cudaChannelFormatKindFloat); cudaArray* cu_array; checkCudaErrors(cudaMallocArray(&cu_array, &ch_desc, width, height)); checkCudaErrors(cudaMemcpyToArray(cu_array, 0, 0, h_data, size, cudaMemcpyHostToDevice)); // Specify texture // Texture is going to be bound to a 1D Array, with name cu_array struct cudaResourceDesc res_desc; memset(&res_desc, 0, sizeof(res_desc)); res_desc.resType = cudaResourceTypeArray; res_desc.res.array.array = cu_array; // Specify texture object parameters // - Clamp mode: if out of bounds clamp index to closest 0 or width | 0 or height // - Without interpoation // - No conversion/normalization of the value read // - Coordinates are normalized to -1,1: useful for trigonometry struct cudaTextureDesc tex_desc; memset(&tex_desc, 0, sizeof(tex_desc)); tex_desc.addressMode[0] = cudaAddressModeClamp; tex_desc.addressMode[1] = cudaAddressModeClamp; tex_desc.filterMode = cudaFilterModePoint; tex_desc.readMode = cudaReadModeElementType; tex_desc.normalizedCoords = 1; // Copy host memory to cudaArray checkCudaErrors(cudaMemcpyToArray(cu_array, 0, 0, h_data, size, cudaMemcpyHostToDevice)); // Create texture object cudaTextureObject_t tex_obj = 0; cudaCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL); // Allocate result of transformation in device memory float* d_output; checkCudaErrors(cudaMalloc(&d_output, size)); // Print host array printArrayAsMatrix(h_data, width, height); // Invoke kernel rotating it once dim3 dimBlock(16, 16); dim3 dimGrid( (width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y); rotateKernel<<<dimGrid, dimBlock>>>(d_output, tex_obj, width, height, theta); // Print result array checkCudaErrors(cudaMemcpy(h_data, d_output, size, cudaMemcpyDeviceToHost)); printArrayAsMatrix(h_data, width, height); // Copy old result to texture and Invoke kernel rotating it again checkCudaErrors(cudaMemcpyToArray(cu_array, 0, 0, d_output, size, cudaMemcpyDeviceToDevice)); rotateKernel<<<dimGrid, dimBlock>>>(d_output, tex_obj, width, height, theta); // Print result array checkCudaErrors(cudaMemcpy(h_data, d_output, size, cudaMemcpyDeviceToHost)); printArrayAsMatrix(h_data, width, height); // Destroy texture object checkCudaErrors(cudaDestroyTextureObject(tex_obj)); // Free device memory checkCudaErrors(cudaFreeArray(cu_array)); checkCudaErrors(cudaFree(d_output)); // Free host memory free(h_data); }
192e806ff5bd64abd34fa191b33f9dcec807b3c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void addForces(const float* __restrict__ forces, long long* __restrict__ forceBuffers, int* __restrict__ atomIndex, int numAtoms, int paddedNumAtoms) { for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < numAtoms; atom += blockDim.x*gridDim.x) { int index = atomIndex[atom]; forceBuffers[atom] += (long long) (forces[3*index]*0x100000000); forceBuffers[atom+paddedNumAtoms] += (long long) (forces[3*index+1]*0x100000000); forceBuffers[atom+2*paddedNumAtoms] += (long long) (forces[3*index+2]*0x100000000); } }
192e806ff5bd64abd34fa191b33f9dcec807b3c5.cu
extern "C" __global__ void addForces(const float* __restrict__ forces, long long* __restrict__ forceBuffers, int* __restrict__ atomIndex, int numAtoms, int paddedNumAtoms) { for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < numAtoms; atom += blockDim.x*gridDim.x) { int index = atomIndex[atom]; forceBuffers[atom] += (long long) (forces[3*index]*0x100000000); forceBuffers[atom+paddedNumAtoms] += (long long) (forces[3*index+1]*0x100000000); forceBuffers[atom+2*paddedNumAtoms] += (long long) (forces[3*index+2]*0x100000000); } }
e62f04c74e089912247c1ccd610bba8a334800ae.hip
// !!! This is a file automatically generated by hipify!!! /*! Copyright 2019 by Contributors */ #include <gtest/gtest.h> #include <xgboost/data.h> #include <xgboost/json.h> #include <thrust/device_vector.h> #include "../../../src/common/device_helpers.cuh" namespace xgboost { TEST(MetaInfo, FromInterface) { hipSetDevice(0); constexpr size_t kRows = 16; thrust::device_vector<float> d_data(kRows); for (size_t i = 0; i < d_data.size(); ++i) { d_data[i] = i * 2.0; } Json column { Object() }; std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))}; column["shape"] = Array(j_shape); column["strides"] = Array(std::vector<Json>{Json(Integer(static_cast<Integer::Int>(4)))}); column["version"] = Integer(static_cast<Integer::Int>(1)); column["typestr"] = String("<f4"); auto p_d_data = dh::Raw(d_data); std::vector<Json> j_data { Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))), Json(Boolean(false))}; column["data"] = j_data; std::stringstream ss; Json::Dump(column, &ss); std::string str = ss.str(); MetaInfo info; info.SetInfo("label", str.c_str()); auto const& h_label = info.labels_.HostVector(); for (size_t i = 0; i < d_data.size(); ++i) { ASSERT_EQ(h_label[i], d_data[i]); } info.SetInfo("weight", str.c_str()); auto const& h_weight = info.weights_.HostVector(); for (size_t i = 0; i < d_data.size(); ++i) { ASSERT_EQ(h_weight[i], d_data[i]); } info.SetInfo("base_margin", str.c_str()); auto const& h_base_margin = info.base_margin_.HostVector(); for (size_t i = 0; i < d_data.size(); ++i) { ASSERT_EQ(h_base_margin[i], d_data[i]); } } } // namespace xgboost
e62f04c74e089912247c1ccd610bba8a334800ae.cu
/*! Copyright 2019 by Contributors */ #include <gtest/gtest.h> #include <xgboost/data.h> #include <xgboost/json.h> #include <thrust/device_vector.h> #include "../../../src/common/device_helpers.cuh" namespace xgboost { TEST(MetaInfo, FromInterface) { cudaSetDevice(0); constexpr size_t kRows = 16; thrust::device_vector<float> d_data(kRows); for (size_t i = 0; i < d_data.size(); ++i) { d_data[i] = i * 2.0; } Json column { Object() }; std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))}; column["shape"] = Array(j_shape); column["strides"] = Array(std::vector<Json>{Json(Integer(static_cast<Integer::Int>(4)))}); column["version"] = Integer(static_cast<Integer::Int>(1)); column["typestr"] = String("<f4"); auto p_d_data = dh::Raw(d_data); std::vector<Json> j_data { Json(Integer(reinterpret_cast<Integer::Int>(p_d_data))), Json(Boolean(false))}; column["data"] = j_data; std::stringstream ss; Json::Dump(column, &ss); std::string str = ss.str(); MetaInfo info; info.SetInfo("label", str.c_str()); auto const& h_label = info.labels_.HostVector(); for (size_t i = 0; i < d_data.size(); ++i) { ASSERT_EQ(h_label[i], d_data[i]); } info.SetInfo("weight", str.c_str()); auto const& h_weight = info.weights_.HostVector(); for (size_t i = 0; i < d_data.size(); ++i) { ASSERT_EQ(h_weight[i], d_data[i]); } info.SetInfo("base_margin", str.c_str()); auto const& h_base_margin = info.base_margin_.HostVector(); for (size_t i = 0; i < d_data.size(); ++i) { ASSERT_EQ(h_base_margin[i], d_data[i]); } } } // namespace xgboost
1498ac8581169093221b58b277798d219486ae26.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include <string> #include "loadSaveImage.h" #include <thrust/extrema.h> //chroma-LogLuminance Space static float *d_x__; static float *d_y__; static float *d_logY__; //memory for the cdf static unsigned int *d_cdf__; static const int numBins = 1024; size_t numRows__; size_t numCols__; /* Copied from Mike's IPython notebook with some minor modifications * Mainly double precision constants to floats and log10 -> log10f * Also removed Luminance (Y) channel since it is never used */ __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } /* Copied from Mike's IPython notebook * Modified just by having threads read the normalization constant directly from device memory instead of copying it back */ __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } /* Copied from Mike's IPython notebook * Modified double constants -> float * Perform tone mapping based upon new * luminance scaling */ __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } //return types are void since any internal error will be handled by quitting //no point in returning error codes... void preProcess(float** d_luminance, unsigned int** d_cdf, size_t *numRows, size_t *numCols, unsigned int *numberOfBins, const std::string &filename) { //make sure the context initializes ok checkCudaErrors(hipFree(0)); float *imgPtr; //we will become responsible for this pointer loadImageHDR(filename, &imgPtr, &numRows__, &numCols__); *numRows = numRows__; *numCols = numCols__; //first thing to do is split incoming BGR float data into separate channels size_t numPixels = numRows__ * numCols__; float *red = new float[numPixels]; float *green = new float[numPixels]; float *blue = new float[numPixels]; //Remeber image is loaded BGR for (size_t i = 0; i < numPixels; ++i) { blue[i] = imgPtr[3 * i + 0]; green[i] = imgPtr[3 * i + 1]; red[i] = imgPtr[3 * i + 2]; } delete[] imgPtr; //being good citizens are releasing resources //allocated in loadImageHDR float *d_red, *d_green, *d_blue; //RGB space size_t channelSize = sizeof(float) * numPixels; checkCudaErrors(hipMalloc(&d_red, channelSize)); checkCudaErrors(hipMalloc(&d_green, channelSize)); checkCudaErrors(hipMalloc(&d_blue, channelSize)); checkCudaErrors(hipMalloc(&d_x__, channelSize)); checkCudaErrors(hipMalloc(&d_y__, channelSize)); checkCudaErrors(hipMalloc(&d_logY__, channelSize)); checkCudaErrors(hipMemcpy(d_red, red, channelSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_green, green, channelSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_blue, blue, channelSize, hipMemcpyHostToDevice)); //convert from RGB space to chrominance/luminance space xyY const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x, (numRows__ + blockSize.y - 1) / blockSize.y, 1); hipLaunchKernelGGL(( rgb_to_xyY), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_green, d_blue, d_x__, d_y__, d_logY__, .0001f, numRows__, numCols__); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); *d_luminance = d_logY__; //allocate memory for the cdf of the histogram *numberOfBins = numBins; checkCudaErrors(hipMalloc(&d_cdf__, sizeof(unsigned int) * numBins)); checkCudaErrors(hipMemset(d_cdf__, 0, sizeof(unsigned int) * numBins)); *d_cdf = d_cdf__; checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); delete[] red; delete[] green; delete[] blue; } void postProcess(const std::string& output_file, size_t numRows, size_t numCols, float min_log_Y, float max_log_Y) { const int numPixels = numRows__ * numCols__; const int numThreads = 192; float *d_cdf_normalized; checkCudaErrors(hipMalloc(&d_cdf_normalized, sizeof(float) * numBins)); //first normalize the cdf to a maximum value of 1 //this is how we compress the range of the luminance channel hipLaunchKernelGGL(( normalize_cdf), dim3((numBins + numThreads - 1) / numThreads), dim3(numThreads), 0, 0, d_cdf__, d_cdf_normalized, numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //allocate memory for the output RGB channels float *h_red, *h_green, *h_blue; float *d_red, *d_green, *d_blue; h_red = new float[numPixels]; h_green = new float[numPixels]; h_blue = new float[numPixels]; checkCudaErrors(hipMalloc(&d_red, sizeof(float) * numPixels)); checkCudaErrors(hipMalloc(&d_green, sizeof(float) * numPixels)); checkCudaErrors(hipMalloc(&d_blue, sizeof(float) * numPixels)); float log_Y_range = max_log_Y - min_log_Y; const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y ); //next perform the actual tone-mapping //we map each luminance value to its new value //and then transform back to RGB space hipLaunchKernelGGL(( tonemap), dim3(gridSize), dim3(blockSize), 0, 0, d_x__, d_y__, d_logY__, d_cdf_normalized, d_red, d_green, d_blue, min_log_Y, max_log_Y, log_Y_range, numBins, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(h_red, d_red, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_green, d_green, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_blue, d_blue, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); //recombine the image channels float *imageHDR = new float[numPixels * 3]; for (int i = 0; i < numPixels; ++i) { imageHDR[3 * i + 0] = h_blue[i]; imageHDR[3 * i + 1] = h_green[i]; imageHDR[3 * i + 2] = h_red[i]; } saveImageHDR(imageHDR, numRows, numCols, output_file); delete[] imageHDR; delete[] h_red; delete[] h_green; delete[] h_blue; //cleanup checkCudaErrors(hipFree(d_cdf_normalized)); } void cleanupGlobalMemory(void) { checkCudaErrors(hipFree(d_x__)); checkCudaErrors(hipFree(d_y__)); checkCudaErrors(hipFree(d_logY__)); checkCudaErrors(hipFree(d_cdf__)); }
1498ac8581169093221b58b277798d219486ae26.cu
#include "utils.h" #include <string> #include "loadSaveImage.h" #include <thrust/extrema.h> //chroma-LogLuminance Space static float *d_x__; static float *d_y__; static float *d_logY__; //memory for the cdf static unsigned int *d_cdf__; static const int numBins = 1024; size_t numRows__; size_t numCols__; /* Copied from Mike's IPython notebook with some minor modifications * Mainly double precision constants to floats and log10 -> log10f * Also removed Luminance (Y) channel since it is never used */ __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } /* Copied from Mike's IPython notebook * Modified just by having threads read the normalization constant directly from device memory instead of copying it back */ __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } /* Copied from Mike's IPython notebook * Modified double constants -> float * Perform tone mapping based upon new * luminance scaling */ __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } //return types are void since any internal error will be handled by quitting //no point in returning error codes... void preProcess(float** d_luminance, unsigned int** d_cdf, size_t *numRows, size_t *numCols, unsigned int *numberOfBins, const std::string &filename) { //make sure the context initializes ok checkCudaErrors(cudaFree(0)); float *imgPtr; //we will become responsible for this pointer loadImageHDR(filename, &imgPtr, &numRows__, &numCols__); *numRows = numRows__; *numCols = numCols__; //first thing to do is split incoming BGR float data into separate channels size_t numPixels = numRows__ * numCols__; float *red = new float[numPixels]; float *green = new float[numPixels]; float *blue = new float[numPixels]; //Remeber image is loaded BGR for (size_t i = 0; i < numPixels; ++i) { blue[i] = imgPtr[3 * i + 0]; green[i] = imgPtr[3 * i + 1]; red[i] = imgPtr[3 * i + 2]; } delete[] imgPtr; //being good citizens are releasing resources //allocated in loadImageHDR float *d_red, *d_green, *d_blue; //RGB space size_t channelSize = sizeof(float) * numPixels; checkCudaErrors(cudaMalloc(&d_red, channelSize)); checkCudaErrors(cudaMalloc(&d_green, channelSize)); checkCudaErrors(cudaMalloc(&d_blue, channelSize)); checkCudaErrors(cudaMalloc(&d_x__, channelSize)); checkCudaErrors(cudaMalloc(&d_y__, channelSize)); checkCudaErrors(cudaMalloc(&d_logY__, channelSize)); checkCudaErrors(cudaMemcpy(d_red, red, channelSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_green, green, channelSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_blue, blue, channelSize, cudaMemcpyHostToDevice)); //convert from RGB space to chrominance/luminance space xyY const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x, (numRows__ + blockSize.y - 1) / blockSize.y, 1); rgb_to_xyY<<<gridSize, blockSize>>>(d_red, d_green, d_blue, d_x__, d_y__, d_logY__, .0001f, numRows__, numCols__); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); *d_luminance = d_logY__; //allocate memory for the cdf of the histogram *numberOfBins = numBins; checkCudaErrors(cudaMalloc(&d_cdf__, sizeof(unsigned int) * numBins)); checkCudaErrors(cudaMemset(d_cdf__, 0, sizeof(unsigned int) * numBins)); *d_cdf = d_cdf__; checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); delete[] red; delete[] green; delete[] blue; } void postProcess(const std::string& output_file, size_t numRows, size_t numCols, float min_log_Y, float max_log_Y) { const int numPixels = numRows__ * numCols__; const int numThreads = 192; float *d_cdf_normalized; checkCudaErrors(cudaMalloc(&d_cdf_normalized, sizeof(float) * numBins)); //first normalize the cdf to a maximum value of 1 //this is how we compress the range of the luminance channel normalize_cdf<<< (numBins + numThreads - 1) / numThreads, numThreads>>>(d_cdf__, d_cdf_normalized, numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //allocate memory for the output RGB channels float *h_red, *h_green, *h_blue; float *d_red, *d_green, *d_blue; h_red = new float[numPixels]; h_green = new float[numPixels]; h_blue = new float[numPixels]; checkCudaErrors(cudaMalloc(&d_red, sizeof(float) * numPixels)); checkCudaErrors(cudaMalloc(&d_green, sizeof(float) * numPixels)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(float) * numPixels)); float log_Y_range = max_log_Y - min_log_Y; const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y ); //next perform the actual tone-mapping //we map each luminance value to its new value //and then transform back to RGB space tonemap<<<gridSize, blockSize>>>(d_x__, d_y__, d_logY__, d_cdf_normalized, d_red, d_green, d_blue, min_log_Y, max_log_Y, log_Y_range, numBins, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(h_red, d_red, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_green, d_green, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_blue, d_blue, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); //recombine the image channels float *imageHDR = new float[numPixels * 3]; for (int i = 0; i < numPixels; ++i) { imageHDR[3 * i + 0] = h_blue[i]; imageHDR[3 * i + 1] = h_green[i]; imageHDR[3 * i + 2] = h_red[i]; } saveImageHDR(imageHDR, numRows, numCols, output_file); delete[] imageHDR; delete[] h_red; delete[] h_green; delete[] h_blue; //cleanup checkCudaErrors(cudaFree(d_cdf_normalized)); } void cleanupGlobalMemory(void) { checkCudaErrors(cudaFree(d_x__)); checkCudaErrors(cudaFree(d_y__)); checkCudaErrors(cudaFree(d_logY__)); checkCudaErrors(cudaFree(d_cdf__)); }
4687e85221343904d30f1f1a0928b9033983d2a6.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zmdot_shfl.cu, normal z -> d, Wed Jan 2 14:18:53 2019 @author Moritz Kreutzer */ #include "magmasparse_internal.h" #include "magmasparse_d.h" #define BLOCK_SIZE 512 #define PRECISION_d #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #if (TORCH_HIP_VERSION <= 6000) // CUDA 6.5 adds Double precision version; here's an implementation for CUDA 6.0 and earlier. // from https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ __device__ inline real_Double_t __shfl_down(real_Double_t var, unsigned int srcLane, int width=32) { int2 a = *reinterpret_cast<int2*>(&var); a.x = __shfl_down(a.x, srcLane, width); a.y = __shfl_down(a.y, srcLane, width); return *reinterpret_cast<double*>(&a); } #endif template<typename T> __inline__ __device__ T warpReduceSum(T val) { #if __CUDA_ARCH__ >= 300 #if __CUDACC_VER_MAJOR__ < 9 val += __shfl_down(val, 16); val += __shfl_down(val, 8); val += __shfl_down(val, 4); val += __shfl_down(val, 2); val += __shfl_down(val, 1); #else val += __shfl_down_sync(0xffffffff,val, 16); val += __shfl_down_sync(0xffffffff,val, 8); val += __shfl_down_sync(0xffffffff,val, 4); val += __shfl_down_sync(0xffffffff,val, 2); val += __shfl_down_sync(0xffffffff,val, 1); #endif #endif return val; } #ifdef PRECISION_z template<> __inline__ __device__ double warpReduceSum<double>(double val) { #if __CUDA_ARCH__ >= 300 int4 a = *reinterpret_cast<int4*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.z += __shfl_down(a.z, 16); a.w += __shfl_down(a.w, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.z += __shfl_down(a.z, 8); a.w += __shfl_down(a.w, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.z += __shfl_down(a.z, 4); a.w += __shfl_down(a.w, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.z += __shfl_down(a.z, 2); a.w += __shfl_down(a.w, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); a.z += __shfl_down(a.z, 1); a.w += __shfl_down(a.w, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.z += __shfl_down_sync(0xffffffff,a.z, 16); a.w += __shfl_down_sync(0xffffffff,a.w, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.z += __shfl_down_sync(0xffffffff,a.z, 8); a.w += __shfl_down_sync(0xffffffff,a.w, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.z += __shfl_down_sync(0xffffffff,a.z, 4); a.w += __shfl_down_sync(0xffffffff,a.w, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.z += __shfl_down_sync(0xffffffff,a.z, 2); a.w += __shfl_down_sync(0xffffffff,a.w, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); a.z += __shfl_down_sync(0xffffffff,a.z, 1); a.w += __shfl_down_sync(0xffffffff,a.w, 1); #endif #endif return val; } #endif // PRECISION_z #ifdef PRECISION_c template<> __inline__ __device__ magmaFloatComplex warpReduceSum<magmaFloatComplex>(magmaFloatComplex val) { #if __CUDA_ARCH__ >= 300 float2 a = *reinterpret_cast<float2*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); #endif #endif return val; } #endif // PRECISION_c template<typename T> __inline__ __device__ T blockReduceSum_1D(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : MAGMA_D_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __inline__ __device__ T blockReduceSum(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[threadIdx.y*32+wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[threadIdx.y*32+lane] : MAGMA_D_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __global__ void deviceReduceKernel(const T * __restrict__ in, T * __restrict__ out, int N) { T sum = MAGMA_D_MAKE(0.0, 0.0); //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = blockReduceSum<T>(sum); if (threadIdx.x == 0) out[blockIdx.x]=sum; } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_dblockdot_kernel_shuffle( int n, int k, const double * __restrict__ v, const double * __restrict__ r, double * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = threadIdx.y; double tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_D_ZERO; } tmp = blockReduceSum(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_dblockdot_kernel_shuffle_1dblock( int n, int k, const double * __restrict__ v, const double * __restrict__ r, double * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j; for (j=0; j < k; j++) { double tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_D_ZERO; } tmp = blockReduceSum_1D(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc_shfl( magma_int_t n, magma_int_t k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { if ( magma_getdevice_arch() < 300 ) { return magma_dmdotc( n, k, v, r, d1, d2, skp, queue ); } else if (1) { // 1D block kernel seems to be always faster dim3 block( BLOCK_SIZE ); dim3 grid( magma_ceildiv( n, block.x ) ); hipLaunchKernelGGL(( magma_dblockdot_kernel_shuffle_1dblock), dim3(grid), dim3(block), 32*sizeof(double), queue->cuda_stream() , n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { hipLaunchKernelGGL(( deviceReduceKernel<double>) , dim3(1), dim3(1024), 32*sizeof(double), queue->cuda_stream(), d1+grid.x*j, skp+j, grid.x); } } else { dim3 block( magma_roundup( magma_ceildiv(BLOCK_SIZE, k), 32 ), k ); while (block.x*block.y > 1024) { block.x -= 32; } dim3 grid( magma_ceildiv( n, block.x ) ); hipLaunchKernelGGL(( magma_dblockdot_kernel_shuffle), dim3(grid), dim3(block), 32*k*sizeof(double), queue->cuda_stream() , n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { hipLaunchKernelGGL(( deviceReduceKernel<double>) , dim3(1), dim3(1024), 32*sizeof(double), queue->cuda_stream(), d1+grid.x*j, skp+j, grid.x); } } return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dgemvmdot_shfl( magma_int_t n, magma_int_t k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { if (k == 1) { // call CUBLAS dotc, we will never be faster double res = magma_ddot( n, v, 1, r, 1, queue ); magma_dsetvector( 1, &res, 1, skp, 1, queue ); } else if ( magma_getdevice_arch() < 300 ) { return magma_dgemvmdot( n, k, v, r, d1, d2, skp, queue ); } else { magma_dmdotc_shfl( n, k, v, r, d1, d2, skp, queue ); } return MAGMA_SUCCESS; }
4687e85221343904d30f1f1a0928b9033983d2a6.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zmdot_shfl.cu, normal z -> d, Wed Jan 2 14:18:53 2019 @author Moritz Kreutzer */ #include "magmasparse_internal.h" #include "magmasparse_d.h" #define BLOCK_SIZE 512 #define PRECISION_d #include <cuda.h> // for CUDA_VERSION #if (CUDA_VERSION <= 6000) // CUDA 6.5 adds Double precision version; here's an implementation for CUDA 6.0 and earlier. // from https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ __device__ inline real_Double_t __shfl_down(real_Double_t var, unsigned int srcLane, int width=32) { int2 a = *reinterpret_cast<int2*>(&var); a.x = __shfl_down(a.x, srcLane, width); a.y = __shfl_down(a.y, srcLane, width); return *reinterpret_cast<double*>(&a); } #endif template<typename T> __inline__ __device__ T warpReduceSum(T val) { #if __CUDA_ARCH__ >= 300 #if __CUDACC_VER_MAJOR__ < 9 val += __shfl_down(val, 16); val += __shfl_down(val, 8); val += __shfl_down(val, 4); val += __shfl_down(val, 2); val += __shfl_down(val, 1); #else val += __shfl_down_sync(0xffffffff,val, 16); val += __shfl_down_sync(0xffffffff,val, 8); val += __shfl_down_sync(0xffffffff,val, 4); val += __shfl_down_sync(0xffffffff,val, 2); val += __shfl_down_sync(0xffffffff,val, 1); #endif #endif return val; } #ifdef PRECISION_z template<> __inline__ __device__ double warpReduceSum<double>(double val) { #if __CUDA_ARCH__ >= 300 int4 a = *reinterpret_cast<int4*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.z += __shfl_down(a.z, 16); a.w += __shfl_down(a.w, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.z += __shfl_down(a.z, 8); a.w += __shfl_down(a.w, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.z += __shfl_down(a.z, 4); a.w += __shfl_down(a.w, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.z += __shfl_down(a.z, 2); a.w += __shfl_down(a.w, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); a.z += __shfl_down(a.z, 1); a.w += __shfl_down(a.w, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.z += __shfl_down_sync(0xffffffff,a.z, 16); a.w += __shfl_down_sync(0xffffffff,a.w, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.z += __shfl_down_sync(0xffffffff,a.z, 8); a.w += __shfl_down_sync(0xffffffff,a.w, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.z += __shfl_down_sync(0xffffffff,a.z, 4); a.w += __shfl_down_sync(0xffffffff,a.w, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.z += __shfl_down_sync(0xffffffff,a.z, 2); a.w += __shfl_down_sync(0xffffffff,a.w, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); a.z += __shfl_down_sync(0xffffffff,a.z, 1); a.w += __shfl_down_sync(0xffffffff,a.w, 1); #endif #endif return val; } #endif // PRECISION_z #ifdef PRECISION_c template<> __inline__ __device__ magmaFloatComplex warpReduceSum<magmaFloatComplex>(magmaFloatComplex val) { #if __CUDA_ARCH__ >= 300 float2 a = *reinterpret_cast<float2*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); #endif #endif return val; } #endif // PRECISION_c template<typename T> __inline__ __device__ T blockReduceSum_1D(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : MAGMA_D_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __inline__ __device__ T blockReduceSum(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[threadIdx.y*32+wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[threadIdx.y*32+lane] : MAGMA_D_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __global__ void deviceReduceKernel(const T * __restrict__ in, T * __restrict__ out, int N) { T sum = MAGMA_D_MAKE(0.0, 0.0); //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = blockReduceSum<T>(sum); if (threadIdx.x == 0) out[blockIdx.x]=sum; } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_dblockdot_kernel_shuffle( int n, int k, const double * __restrict__ v, const double * __restrict__ r, double * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = threadIdx.y; double tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_D_ZERO; } tmp = blockReduceSum(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_dblockdot_kernel_shuffle_1dblock( int n, int k, const double * __restrict__ v, const double * __restrict__ r, double * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j; for (j=0; j < k; j++) { double tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_D_ZERO; } tmp = blockReduceSum_1D(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc_shfl( magma_int_t n, magma_int_t k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { if ( magma_getdevice_arch() < 300 ) { return magma_dmdotc( n, k, v, r, d1, d2, skp, queue ); } else if (1) { // 1D block kernel seems to be always faster dim3 block( BLOCK_SIZE ); dim3 grid( magma_ceildiv( n, block.x ) ); magma_dblockdot_kernel_shuffle_1dblock<<< grid, block, 32*sizeof(double), queue->cuda_stream() >>>( n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { deviceReduceKernel<double> <<<1, 1024, 32*sizeof(double), queue->cuda_stream()>>>(d1+grid.x*j, skp+j, grid.x); } } else { dim3 block( magma_roundup( magma_ceildiv(BLOCK_SIZE, k), 32 ), k ); while (block.x*block.y > 1024) { block.x -= 32; } dim3 grid( magma_ceildiv( n, block.x ) ); magma_dblockdot_kernel_shuffle<<< grid, block, 32*k*sizeof(double), queue->cuda_stream() >>>( n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { deviceReduceKernel<double> <<<1, 1024, 32*sizeof(double), queue->cuda_stream()>>>(d1+grid.x*j, skp+j, grid.x); } } return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dgemvmdot_shfl( magma_int_t n, magma_int_t k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { if (k == 1) { // call CUBLAS dotc, we will never be faster double res = magma_ddot( n, v, 1, r, 1, queue ); magma_dsetvector( 1, &res, 1, skp, 1, queue ); } else if ( magma_getdevice_arch() < 300 ) { return magma_dgemvmdot( n, k, v, r, d1, d2, skp, queue ); } else { magma_dmdotc_shfl( n, k, v, r, d1, d2, skp, queue ); } return MAGMA_SUCCESS; }
905cae721e35e548d31ace71673e090d78a4af74.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/sync_batch_norm_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/gpu/sync_batch_norm_utils.h" namespace phi { template <typename T, typename Context> void SyncBatchNormKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const DenseTensor &mean, const DenseTensor &variance, float momentum, float epsilon_f, const std::string &data_layout_str, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, DenseTensor *y, DenseTensor *mean_out, DenseTensor *variance_out, DenseTensor *saved_mean, DenseTensor *saved_variance, DenseTensor *reserve_space) { PADDLE_ENFORCE_EQ(use_global_stats, false, phi::errors::InvalidArgument( "sync_batch_norm doesn't support " "to set use_global_stats True. Please use batch_norm " "in this case.")); double epsilon = epsilon_f; const bool trainable_stats = trainable_statistics; const DataLayout layout = paddle::framework::StringToDataLayout(data_layout_str); bool test_mode = is_test && (!trainable_statistics); const auto &x_dims = x.dims(); PADDLE_ENFORCE_GE(x_dims.size(), 2, phi::errors::InvalidArgument( "The Input dim size should be larger than 1.")); PADDLE_ENFORCE_LE(x_dims.size(), 5, phi::errors::InvalidArgument( "The Input dim size should be less than 6.")); int N, C, H, W, D; funcs::ExtractNCWHD(x_dims, layout, &N, &C, &H, &W, &D); int x_numel = x.numel(); const T *x_d = x.template data<T>(); const auto *s_d = scale.template data<BatchNormParamType<T>>(); const auto *b_d = bias.template data<BatchNormParamType<T>>(); T *y_d = ctx.template Alloc<T>(y); const BatchNormParamType<T> *mean_data = nullptr; const BatchNormParamType<T> *var_data = nullptr; auto stream = ctx.stream(); const int block = 512; int max_threads = ctx.GetMaxPhysicalThreadCount(); paddle::memory::AllocationPtr alloc_ptr{nullptr}; if (test_mode) { mean_data = mean.template data<BatchNormParamType<T>>(); var_data = variance.template data<BatchNormParamType<T>>(); } else { // x, x^2, 1, here 1 is used to calc device num // device num also can be got from platform::DeviceContextPool const int bytes = (C * 2 + 1) * sizeof(BatchNormParamType<T>); alloc_ptr = paddle::memory::Alloc(ctx, bytes); auto *stats = reinterpret_cast<BatchNormParamType<T> *>(alloc_ptr->ptr()); const int threads = 256; int grid = ::min(C, (max_threads + threads - 1) / threads); if (layout == paddle::framework::DataLayout::kNCHW) { hipLaunchKernelGGL(( KeLocalStats<T, threads, paddle::framework::DataLayout::kNCHW>) , dim3(grid), dim3(threads), 0, stream, x_d, N, H * W * D, C, stats); } else { hipLaunchKernelGGL(( KeLocalStats<T, threads, paddle::framework::DataLayout::kNHWC>) , dim3(grid), dim3(threads), 0, stream, x_d, N, H * W * D, C, stats); } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto *comm = ctx.nccl_comm(); if (comm) { int dtype = paddle::platform::ToNCCLDataType( paddle::framework::TransToProtoVarType(mean_out->dtype())); // In-place operation PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( stats, stats, 2 * C + 1, static_cast<ncclDataType_t>(dtype), ncclSum, comm, stream)); } #endif auto *est_mean_data = ctx.template Alloc<BatchNormParamType<T>>(mean_out); auto *est_var_data = ctx.template Alloc<BatchNormParamType<T>>(variance_out); auto *sv_mean_data = ctx.template Alloc<BatchNormParamType<T>>(saved_mean); auto *sv_inv_var_data = ctx.template Alloc<BatchNormParamType<T>>(saved_variance); // Note, Input('Mean')/Input('Variance') share variable with // Output('MeanOut')/Output('VarianceOut') hipLaunchKernelGGL(( KeSyncAndMovingStats<T>) , dim3((C + block - 1) / block), dim3(block), 0, stream, stats, stats + C, stats + 2 * C, C, momentum, epsilon, sv_mean_data, sv_inv_var_data, est_mean_data, est_var_data); mean_data = sv_mean_data; var_data = stats + C; } int grid2 = (::min(x_numel, max_threads) + block - 1) / block; if (layout == paddle::framework::DataLayout::kNCHW) { hipLaunchKernelGGL(( KeNormAffine<T, paddle::framework::DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, stream, x_d, s_d, b_d, mean_data, var_data, epsilon, C, H * W * D, x_numel, y_d); } else { hipLaunchKernelGGL(( KeNormAffine<T, paddle::framework::DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, stream, x_d, s_d, b_d, mean_data, var_data, epsilon, C, H * W * D, x_numel, y_d); } } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(sync_batch_norm, GPU, ALL_LAYOUT, phi::SyncBatchNormKernel, float, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->InputAt(1).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(2).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(3).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(4).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32); } } #else PD_REGISTER_KERNEL(sync_batch_norm, GPU, ALL_LAYOUT, phi::SyncBatchNormKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->InputAt(1).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(2).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(3).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(4).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32); } } #endif
905cae721e35e548d31ace71673e090d78a4af74.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/sync_batch_norm_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/gpu/sync_batch_norm_utils.h" namespace phi { template <typename T, typename Context> void SyncBatchNormKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const DenseTensor &mean, const DenseTensor &variance, float momentum, float epsilon_f, const std::string &data_layout_str, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, DenseTensor *y, DenseTensor *mean_out, DenseTensor *variance_out, DenseTensor *saved_mean, DenseTensor *saved_variance, DenseTensor *reserve_space) { PADDLE_ENFORCE_EQ(use_global_stats, false, phi::errors::InvalidArgument( "sync_batch_norm doesn't support " "to set use_global_stats True. Please use batch_norm " "in this case.")); double epsilon = epsilon_f; const bool trainable_stats = trainable_statistics; const DataLayout layout = paddle::framework::StringToDataLayout(data_layout_str); bool test_mode = is_test && (!trainable_statistics); const auto &x_dims = x.dims(); PADDLE_ENFORCE_GE(x_dims.size(), 2, phi::errors::InvalidArgument( "The Input dim size should be larger than 1.")); PADDLE_ENFORCE_LE(x_dims.size(), 5, phi::errors::InvalidArgument( "The Input dim size should be less than 6.")); int N, C, H, W, D; funcs::ExtractNCWHD(x_dims, layout, &N, &C, &H, &W, &D); int x_numel = x.numel(); const T *x_d = x.template data<T>(); const auto *s_d = scale.template data<BatchNormParamType<T>>(); const auto *b_d = bias.template data<BatchNormParamType<T>>(); T *y_d = ctx.template Alloc<T>(y); const BatchNormParamType<T> *mean_data = nullptr; const BatchNormParamType<T> *var_data = nullptr; auto stream = ctx.stream(); const int block = 512; int max_threads = ctx.GetMaxPhysicalThreadCount(); paddle::memory::AllocationPtr alloc_ptr{nullptr}; if (test_mode) { mean_data = mean.template data<BatchNormParamType<T>>(); var_data = variance.template data<BatchNormParamType<T>>(); } else { // x, x^2, 1, here 1 is used to calc device num // device num also can be got from platform::DeviceContextPool const int bytes = (C * 2 + 1) * sizeof(BatchNormParamType<T>); alloc_ptr = paddle::memory::Alloc(ctx, bytes); auto *stats = reinterpret_cast<BatchNormParamType<T> *>(alloc_ptr->ptr()); const int threads = 256; int grid = std::min(C, (max_threads + threads - 1) / threads); if (layout == paddle::framework::DataLayout::kNCHW) { KeLocalStats<T, threads, paddle::framework::DataLayout::kNCHW> <<<grid, threads, 0, stream>>>(x_d, N, H * W * D, C, stats); } else { KeLocalStats<T, threads, paddle::framework::DataLayout::kNHWC> <<<grid, threads, 0, stream>>>(x_d, N, H * W * D, C, stats); } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto *comm = ctx.nccl_comm(); if (comm) { int dtype = paddle::platform::ToNCCLDataType( paddle::framework::TransToProtoVarType(mean_out->dtype())); // In-place operation PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( stats, stats, 2 * C + 1, static_cast<ncclDataType_t>(dtype), ncclSum, comm, stream)); } #endif auto *est_mean_data = ctx.template Alloc<BatchNormParamType<T>>(mean_out); auto *est_var_data = ctx.template Alloc<BatchNormParamType<T>>(variance_out); auto *sv_mean_data = ctx.template Alloc<BatchNormParamType<T>>(saved_mean); auto *sv_inv_var_data = ctx.template Alloc<BatchNormParamType<T>>(saved_variance); // Note, Input('Mean')/Input('Variance') share variable with // Output('MeanOut')/Output('VarianceOut') KeSyncAndMovingStats<T> <<<(C + block - 1) / block, block, 0, stream>>>(stats, stats + C, stats + 2 * C, C, momentum, epsilon, sv_mean_data, sv_inv_var_data, est_mean_data, est_var_data); mean_data = sv_mean_data; var_data = stats + C; } int grid2 = (std::min(x_numel, max_threads) + block - 1) / block; if (layout == paddle::framework::DataLayout::kNCHW) { KeNormAffine<T, paddle::framework::DataLayout::kNCHW> <<<grid2, block, 0, stream>>>(x_d, s_d, b_d, mean_data, var_data, epsilon, C, H * W * D, x_numel, y_d); } else { KeNormAffine<T, paddle::framework::DataLayout::kNHWC> <<<grid2, block, 0, stream>>>(x_d, s_d, b_d, mean_data, var_data, epsilon, C, H * W * D, x_numel, y_d); } } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(sync_batch_norm, GPU, ALL_LAYOUT, phi::SyncBatchNormKernel, float, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->InputAt(1).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(2).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(3).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(4).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32); } } #else PD_REGISTER_KERNEL(sync_batch_norm, GPU, ALL_LAYOUT, phi::SyncBatchNormKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->InputAt(1).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(2).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(3).SetDataType(phi::DataType::FLOAT32); kernel->InputAt(4).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32); kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32); } } #endif
cf230af9f58b2ba659cf81c3df46fbe9bb9fcd60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void sinwave_vbo_kernel(float4 *pos, float3 *posDir, float3 *posDirInitial, float3 *posGravity, float *posLife, float *posFade, unsigned int width, unsigned int height, float velocity, float animTime) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; long index = y * width + x; pos[index].x += (posDir[index].x / (velocity * 1000))*4; pos[index].y += (posDir[index].y / (velocity * 1000))*2; pos[index].z += (posDir[index].z / (velocity * 1000))*1; pos[index].w = 1.0; //sin(4.0 * posDir[index].z * animTime) * cos(4.0 * posDir[index].y * animTime) * 0.5f; posDir[index].x += posGravity[index].x; posDir[index].y += posGravity[index].y; posDir[index].z += posGravity[index].z; // pos[index].x < bx && pos[index].y < by && pos[index].z < bz // compare bonding box and end then end up life posLife[index] -= posFade[index]; /*if(pos[index].z < 0.0) { pos[index].z = 0.0; }*/ if (posLife[index] < -30.0 /*pos[index].y <= -50.0*/) { posLife[index] = 2.0; pos[index] = make_float4(0.0, 0.0, 0.0, 1.0); posDir[index] = posDirInitial[index]; /*int co1 = (int) (pos[index].x / 50.0); float rem1 = pos[index].x - (co1 * 50.0); int co2 = (int) (pos[index].y / 50.0); float rem2 = pos[index].y - (co2 * 50.0); int co3 = (int)(pos[index].z / 50.0); float rem3 = pos[index].z - (co3 * 50.0); posDir[index].x = (float)((rem1) - 26.0)*10.0; posDir[index].y = (float)((rem2) - 25.0)*10.0; posDir[index].z = (float)((rem3) - 25.0)*10.0;*/ } //pos[y*width+x] = make_float4(u,w,v,1.0); return; } //Round a / b to nearest higher integer value int cuda_iDivUp(int a, int b) { return (a + (b - 1)) / b; } void launchCudaKernel(float4* pos, float3 *posDir, float3 *posDirInitial, float3 *posGravity, float *posLife, float *posFade, unsigned int width, unsigned int height, float velocity, float animTime) { dim3 block(8, 8, 1); dim3 grid(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1.0); hipLaunchKernelGGL(( sinwave_vbo_kernel) , dim3(grid), dim3(block) , 0, 0, pos, posDir, posDirInitial, posGravity, posLife, posFade, width, height, velocity, animTime); }
cf230af9f58b2ba659cf81c3df46fbe9bb9fcd60.cu
__global__ void sinwave_vbo_kernel(float4 *pos, float3 *posDir, float3 *posDirInitial, float3 *posGravity, float *posLife, float *posFade, unsigned int width, unsigned int height, float velocity, float animTime) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; long index = y * width + x; pos[index].x += (posDir[index].x / (velocity * 1000))*4; pos[index].y += (posDir[index].y / (velocity * 1000))*2; pos[index].z += (posDir[index].z / (velocity * 1000))*1; pos[index].w = 1.0; //sin(4.0 * posDir[index].z * animTime) * cos(4.0 * posDir[index].y * animTime) * 0.5f; posDir[index].x += posGravity[index].x; posDir[index].y += posGravity[index].y; posDir[index].z += posGravity[index].z; // pos[index].x < bx && pos[index].y < by && pos[index].z < bz // compare bonding box and end then end up life posLife[index] -= posFade[index]; /*if(pos[index].z < 0.0) { pos[index].z = 0.0; }*/ if (posLife[index] < -30.0 /*pos[index].y <= -50.0*/) { posLife[index] = 2.0; pos[index] = make_float4(0.0, 0.0, 0.0, 1.0); posDir[index] = posDirInitial[index]; /*int co1 = (int) (pos[index].x / 50.0); float rem1 = pos[index].x - (co1 * 50.0); int co2 = (int) (pos[index].y / 50.0); float rem2 = pos[index].y - (co2 * 50.0); int co3 = (int)(pos[index].z / 50.0); float rem3 = pos[index].z - (co3 * 50.0); posDir[index].x = (float)((rem1) - 26.0)*10.0; posDir[index].y = (float)((rem2) - 25.0)*10.0; posDir[index].z = (float)((rem3) - 25.0)*10.0;*/ } //pos[y*width+x] = make_float4(u,w,v,1.0); return; } //Round a / b to nearest higher integer value int cuda_iDivUp(int a, int b) { return (a + (b - 1)) / b; } void launchCudaKernel(float4* pos, float3 *posDir, float3 *posDirInitial, float3 *posGravity, float *posLife, float *posFade, unsigned int width, unsigned int height, float velocity, float animTime) { dim3 block(8, 8, 1); dim3 grid(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1.0); sinwave_vbo_kernel <<< grid, block >>> (pos, posDir, posDirInitial, posGravity, posLife, posFade, width, height, velocity, animTime); }
e2518ddbf6efa5b3be2cf5dec25b68386c787cb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "DenseLayer.cuh" DenseLayer::DenseLayer( std::string name_, LayerShape shape_, hipblasHandle_t hCublas_, float filterScale) : hCublas(hCublas_), ones(NULL) { this->name = name_; this->shape = shape_; //assert(shape.in_nrn_h == 1); //assert(shape.in_nrn_w == 1); //assert(shape.out_nrn_h == 1); //assert(shape.out_nrn_w == 1); x.init(shape.batch_size, shape.in_nrns, shape.in_nrn_h, shape.in_nrn_w); w.init(shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w, shape.in_nrns * shape.in_nrn_h * shape.in_nrn_w, 1, 1); b.init(1, shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w, 1, 1); dx = x; dw = w; db = b; if (filterScale == 0.0f) { w.normalDistribution(1.0f / sqrtf((float)(w.C * w.H * w.W))); } else { w.normalDistribution(filterScale); } dw.fill(0.0f); x.fill(0.0f); dx.fill(0.0f); b.fill(0.0f); db.fill(0.0f); initOnes(); } __global__ void FillOnes(float* vec, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; vec[idx] = 1.0f; } void DenseLayer::initOnes() { int size = shape.batch_size; CHECK_CUDA(hipMallocManaged(&ones, size * sizeof(float))); dim3 block_size(128); dim3 num_of_blocks((size + block_size.x - 1) / block_size.x); hipLaunchKernelGGL(( FillOnes), dim3(num_of_blocks), dim3(block_size), 0, 0, ones, size); CHECK_CUDA(hipGetLastError()); } void DenseLayer::init() { //assert(y->shape.in_nrn_h == 1); //assert(y->shape.in_nrn_w == 1); //assert(y->shape.out_nrn_h == 1); //assert(y->shape.out_nrn_w == 1); assert(y->C * y->H * y->W == w.N); } void DenseLayer::forward() { // y = w^T * x const int M = shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w; const int N = shape.batch_size; const int K = shape.in_nrns * shape.in_nrn_h * shape.in_nrn_w; CHECK_CUBLAS(hipblasSgemm(hCublas, HIPBLAS_OP_T, HIPBLAS_OP_N, M, N, K, alpha, w.data, K, x.data, K, beta, y->data, M)); // y = y + b * ones CHECK_CUBLAS(hipblasSgemm(hCublas, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, 1, alpha, b.data, M, ones, 1, alpha, y->data, M)); //CHECK_CUDA(hipDeviceSynchronize()); //y->show(name.c_str()); } void DenseLayer::backward(float learning_rate, bool last) { // dw = x * dy^T int M = shape.in_nrns * shape.in_nrn_h * shape.in_nrn_w; int N = shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w; int K = shape.batch_size; CHECK_CUBLAS(hipblasSgemm(hCublas, HIPBLAS_OP_N, HIPBLAS_OP_T, M, N, K, alpha, x.data, M, dy->data, N, beta, dw.data, M)); // db = dy * ones M = shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w; N = shape.batch_size; CHECK_CUBLAS(hipblasSgemv(hCublas, HIPBLAS_OP_N, M, N, alpha, dy->data, M, ones, 1, beta, db.data, 1)); if (!last) { // dx = w * dy M = shape.in_nrns * shape.in_nrn_h * shape.in_nrn_w; N = shape.batch_size; K = shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w; CHECK_CUBLAS(hipblasSgemm(hCublas, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, alpha, w.data, M, dy->data, K, beta, dx.data, M)); } // Update weights and bias float learn_alpha = -learning_rate; // w = -lr * dw + w CHECK_CUBLAS(hipblasSaxpy(hCublas, dw.size(), &learn_alpha, dw.data, 1, w.data, 1)); // b = -lr * db + b CHECK_CUBLAS(hipblasSaxpy(hCublas, db.size(), &learn_alpha, db.data, 1, b.data, 1)); } DenseLayer::~DenseLayer() { if (ones) { CHECK_CUDA(hipFree(ones)); } }
e2518ddbf6efa5b3be2cf5dec25b68386c787cb5.cu
#include "DenseLayer.cuh" DenseLayer::DenseLayer( std::string name_, LayerShape shape_, cublasHandle_t hCublas_, float filterScale) : hCublas(hCublas_), ones(NULL) { this->name = name_; this->shape = shape_; //assert(shape.in_nrn_h == 1); //assert(shape.in_nrn_w == 1); //assert(shape.out_nrn_h == 1); //assert(shape.out_nrn_w == 1); x.init(shape.batch_size, shape.in_nrns, shape.in_nrn_h, shape.in_nrn_w); w.init(shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w, shape.in_nrns * shape.in_nrn_h * shape.in_nrn_w, 1, 1); b.init(1, shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w, 1, 1); dx = x; dw = w; db = b; if (filterScale == 0.0f) { w.normalDistribution(1.0f / sqrtf((float)(w.C * w.H * w.W))); } else { w.normalDistribution(filterScale); } dw.fill(0.0f); x.fill(0.0f); dx.fill(0.0f); b.fill(0.0f); db.fill(0.0f); initOnes(); } __global__ void FillOnes(float* vec, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; vec[idx] = 1.0f; } void DenseLayer::initOnes() { int size = shape.batch_size; CHECK_CUDA(cudaMallocManaged(&ones, size * sizeof(float))); dim3 block_size(128); dim3 num_of_blocks((size + block_size.x - 1) / block_size.x); FillOnes<<<num_of_blocks, block_size>>>(ones, size); CHECK_CUDA(cudaGetLastError()); } void DenseLayer::init() { //assert(y->shape.in_nrn_h == 1); //assert(y->shape.in_nrn_w == 1); //assert(y->shape.out_nrn_h == 1); //assert(y->shape.out_nrn_w == 1); assert(y->C * y->H * y->W == w.N); } void DenseLayer::forward() { // y = w^T * x const int M = shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w; const int N = shape.batch_size; const int K = shape.in_nrns * shape.in_nrn_h * shape.in_nrn_w; CHECK_CUBLAS(cublasSgemm_v2(hCublas, CUBLAS_OP_T, CUBLAS_OP_N, M, N, K, alpha, w.data, K, x.data, K, beta, y->data, M)); // y = y + b * ones CHECK_CUBLAS(cublasSgemm_v2(hCublas, CUBLAS_OP_N, CUBLAS_OP_N, M, N, 1, alpha, b.data, M, ones, 1, alpha, y->data, M)); //CHECK_CUDA(cudaDeviceSynchronize()); //y->show(name.c_str()); } void DenseLayer::backward(float learning_rate, bool last) { // dw = x * dy^T int M = shape.in_nrns * shape.in_nrn_h * shape.in_nrn_w; int N = shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w; int K = shape.batch_size; CHECK_CUBLAS(cublasSgemm_v2(hCublas, CUBLAS_OP_N, CUBLAS_OP_T, M, N, K, alpha, x.data, M, dy->data, N, beta, dw.data, M)); // db = dy * ones M = shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w; N = shape.batch_size; CHECK_CUBLAS(cublasSgemv_v2(hCublas, CUBLAS_OP_N, M, N, alpha, dy->data, M, ones, 1, beta, db.data, 1)); if (!last) { // dx = w * dy M = shape.in_nrns * shape.in_nrn_h * shape.in_nrn_w; N = shape.batch_size; K = shape.out_nrns * shape.out_nrn_h * shape.out_nrn_w; CHECK_CUBLAS(cublasSgemm_v2(hCublas, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, alpha, w.data, M, dy->data, K, beta, dx.data, M)); } // Update weights and bias float learn_alpha = -learning_rate; // w = -lr * dw + w CHECK_CUBLAS(cublasSaxpy_v2(hCublas, dw.size(), &learn_alpha, dw.data, 1, w.data, 1)); // b = -lr * db + b CHECK_CUBLAS(cublasSaxpy_v2(hCublas, db.size(), &learn_alpha, db.data, 1, b.data, 1)); } DenseLayer::~DenseLayer() { if (ones) { CHECK_CUDA(cudaFree(ones)); } }
3487174cf71db3736a286e50a0f2f29364165c30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // VecField3DCUDA.cu // // Copyright (C) 2013 by University of Stuttgart (VISUS). // All rights reserved. // #include "helper_cuda.h" #include "helper_functions.h" #include "helper_math.h" // Shut up eclipse syntax error highlighting #ifdef __CDT_PARSER__ #define __device__ #define __global__ #define __shared__ #define __constant__ #define __host__ #endif // Toggle performance measurement and respective messages #define USE_TIMER __constant__ __device__ uint3 dim; __constant__ __device__ float3 org; __constant__ __device__ float3 maxCoord; __constant__ __device__ float3 spacing; __constant__ __device__ float streamlinesStep; __constant__ __device__ uint streamlinesStepCnt; __constant__ __device__ uint nPos; __constant__ __device__ uint maxStackSize; /* isValidGridPos_D */ inline __device__ bool isValidGridPos_D(float3 pos) { return (pos.x < maxCoord.x)&& (pos.y < maxCoord.y)&& (pos.z < maxCoord.z)&& (pos.x >= org.x)&& (pos.y >= org.y)&& (pos.z >= org.z); } /* sampleVecFieldLin_D */ inline __device__ float3 sampleVecFieldLin_D(float3 v0, float3 v1, float alpha) { return v0+alpha*(v1-v0); } /* sampleVecFieldBilin_D */ inline __device__ float3 sampleVecFieldBilin_D(float3 v0, float3 v1, float3 v2, float3 v3, float alpha, float beta) { return sampleVecFieldLin_D(sampleVecFieldLin_D(v0, v1, alpha), sampleVecFieldLin_D(v2, v3, alpha), beta); } /* sampleVecFieldTrilin_D */ inline __device__ float3 sampleVecFieldTrilin_D(float3 v[8], float alpha, float beta, float gamma) { return sampleVecFieldLin_D( sampleVecFieldBilin_D(v[0], v[1], v[2], v[3], alpha, beta), sampleVecFieldBilin_D(v[4], v[5], v[6], v[7], alpha, beta), gamma); /* float3 a, b, c, d, e, f, g, h; a = v[0]; b = v[1] - v[0]; c = v[2] - v[0]; d = v[3] - v[1] - v[2] + v[0]; e = v[4] - v[0]; f = v[5] - v[1] - v[4] + v[0]; g = v[6] - v[2] - v[4] + v[0]; h = v[7] - v[3] - v[5] - v[6] + v[1] + v[2] + v[4] - v[0]; return a + b*alpha + c*beta + d*alpha*beta + e*gamma + f*alpha*gamma + g*beta*gamma + h*alpha*beta*gamma; */ } /* sampleVecFieldAtTrilinNorm_D */ inline __device__ float3 sampleVecFieldAtTrilinNorm_D(float3 pos, const float3 *vecField_D) { float3 f; uint3 c; // Get id of the cell containing the given position and interpolation // coefficients f.x = (pos.x-org.x)/spacing.x; f.y = (pos.y-org.y)/spacing.y; f.z = (pos.z-org.z)/spacing.z; c.x = (uint)(f.x); c.y = (uint)(f.y); c.z = (uint)(f.z); f.x = f.x-(float)c.x; // alpha f.y = f.y-(float)c.y; // beta f.z = f.z-(float)c.z; // gamma // Get vector field at corners of current cell float3 v[8]; v[0] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+0]); v[1] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+1]); v[2] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+0]); v[3] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+1]); v[4] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+0]); v[5] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+1]); v[6] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+0]); v[7] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+1]); return sampleVecFieldTrilin_D(v, f.x, f.y, f.z); } /* UpdatePositionRK4_D */ __global__ void UpdatePositionRK4_D(const float3 *vecField_D, float3 *pos_D) { // Get thread idx uint idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx >= nPos) return; //float3 posAlt = pos_D[idx]; float3 x0, x1, x2, x3, v0, v1, v2, v3; v0 = make_float3(0.0, 0.0, 0.0); v1 = make_float3(0.0, 0.0, 0.0); v2 = make_float3(0.0, 0.0, 0.0); v3 = make_float3(0.0, 0.0, 0.0); x0 = pos_D[idx]; v0 = normalize(sampleVecFieldAtTrilinNorm_D(x0, vecField_D)); v0 *= streamlinesStep; x1 = x0 + 0.5*v0; if(isValidGridPos_D(x1)) { v1 = normalize(sampleVecFieldAtTrilinNorm_D(x1, vecField_D)); v1 *= streamlinesStep; } x2 = x0 + 0.5f*v1; if(isValidGridPos_D(x2)) { v2 = normalize(sampleVecFieldAtTrilinNorm_D(x2, vecField_D)); v2 *= streamlinesStep; } x3 = x0 + v2; if(isValidGridPos_D(x3)) { v3 = normalize(sampleVecFieldAtTrilinNorm_D(x3, vecField_D)); v3 *= streamlinesStep; } x0 += (1.0f/6.0f)*(v0+2.0f*v1+2.0f*v2+v3); if(isValidGridPos_D(x0)) { pos_D[idx] = x0; } /*//pos_D[idx] = (1.0f/6.0f)*(v0+2.0f*v1+2.0f*v2+v3); // DEBUG //pos_D[idx] = v0; // DEBUG float3 f; uint3 c; f.x = (posAlt.x-org.x)/spacing.x; f.y = (posAlt.y-org.y)/spacing.y; f.z = (posAlt.z-org.z)/spacing.z; c.x = (uint)(f.x); c.y = (uint)(f.y); c.z = (uint)(f.z); f.x = f.x-(float)c.x; // alpha f.y = f.y-(float)c.y; // beta f.z = f.z-(float)c.z; // gamma // Get vector field at corners of current cell float3 v[8]; v[0] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+0]; v[1] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+1]; v[2] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+0]; v[3] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+1]; v[4] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+0]; v[5] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+1]; v[6] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+0]; v[7] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+1]; float3 v_test = sampleVecFieldAtTrilinNorm_D(posAlt, vecField_D); //pos_D[idx] = make_float3(f.x, f.y, f.z); // DEBUG //pos_D[idx] = make_float3((float)c.x, (float)c.y, (float)c.z); // DEBUG //pos_D[idx] = make_float3(org.x, org.y, org.z); // DEBUG //pos_D[idx] = make_float3(spacing.x, spacing.y, spacing.z); // DEBUG //pos_D[idx] = make_float3(v[7].x, v[7].y, v[7].z); // DEBUG //pos_D[idx] = make_float3(v_test.x, v_test.y, v_test.z); // DEBUG*/ } /* UpdatePositionRK4_D */ __global__ void UpdatePositionBackwardRK4_D(const float3 *vecField_D, float3 *pos_D) { // Get thread idx uint idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx >= nPos) return; //float3 posAlt = pos_D[idx]; float3 x0, x1, x2, x3, v0, v1, v2, v3; v0 = make_float3(0.0, 0.0, 0.0); v1 = make_float3(0.0, 0.0, 0.0); v2 = make_float3(0.0, 0.0, 0.0); v3 = make_float3(0.0, 0.0, 0.0); x0 = pos_D[idx]; v0 = normalize(sampleVecFieldAtTrilinNorm_D(x0, vecField_D)); v0 *= streamlinesStep; x1 = x0 - 0.5*v0; if(isValidGridPos_D(x1)) { v1 = normalize(sampleVecFieldAtTrilinNorm_D(x1, vecField_D)); v1 *= streamlinesStep; } x2 = x0 - 0.5f*v1; if(isValidGridPos_D(x2)) { v2 = normalize(sampleVecFieldAtTrilinNorm_D(x2, vecField_D)); v2 *= streamlinesStep; } x3 = x0 - v2; if(isValidGridPos_D(x3)) { v3 = normalize(sampleVecFieldAtTrilinNorm_D(x3, vecField_D)); v3 *= streamlinesStep; } x0 -= (1.0f/6.0f)*(v0+2.0f*v1+2.0f*v2+v3); if(isValidGridPos_D(x0)) { pos_D[idx] = x0; } /*//pos_D[idx] = (1.0f/6.0f)*(v0+2.0f*v1+2.0f*v2+v3); // DEBUG //pos_D[idx] = v0; // DEBUG float3 f; uint3 c; f.x = (posAlt.x-org.x)/spacing.x; f.y = (posAlt.y-org.y)/spacing.y; f.z = (posAlt.z-org.z)/spacing.z; c.x = (uint)(f.x); c.y = (uint)(f.y); c.z = (uint)(f.z); f.x = f.x-(float)c.x; // alpha f.y = f.y-(float)c.y; // beta f.z = f.z-(float)c.z; // gamma // Get vector field at corners of current cell float3 v[8]; v[0] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+0]; v[1] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+1]; v[2] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+0]; v[3] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+1]; v[4] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+0]; v[5] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+1]; v[6] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+0]; v[7] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+1]; float3 v_test = sampleVecFieldAtTrilinNorm_D(posAlt, vecField_D); //pos_D[idx] = make_float3(f.x, f.y, f.z); // DEBUG //pos_D[idx] = make_float3((float)c.x, (float)c.y, (float)c.z); // DEBUG //pos_D[idx] = make_float3(org.x, org.y, org.z); // DEBUG //pos_D[idx] = make_float3(spacing.x, spacing.y, spacing.z); // DEBUG //pos_D[idx] = make_float3(v[7].x, v[7].y, v[7].z); // DEBUG //pos_D[idx] = make_float3(v_test.x, v_test.y, v_test.z); // DEBUG*/ } /* isFieldVanishingInCell */ inline __device__ bool isFieldVanishingInCell_D(float3 v[8]) { return (!(((v[0].x > 0)&&(v[1].x > 0)&&(v[2].x > 0)&& (v[3].x > 0)&&(v[4].x > 0)&&(v[5].x > 0)&& (v[6].x > 0)&&(v[7].x > 0))|| ((v[0].x < 0)&&(v[1].x < 0)&&(v[2].x < 0)&& (v[3].x < 0)&&(v[4].x < 0)&&(v[5].x < 0)&& (v[6].x < 0)&&(v[7].x < 0))|| ((v[0].y > 0)&&(v[1].y > 0)&&(v[2].y > 0)&& (v[3].y > 0)&&(v[4].y > 0)&&(v[5].y > 0)&& (v[6].y > 0)&&(v[7].y > 0))|| ((v[0].y < 0)&&(v[1].y < 0)&&(v[2].y < 0)&& (v[3].y < 0)&&(v[4].y < 0)&&(v[5].y < 0)&& (v[6].y < 0)&&(v[7].y < 0))|| ((v[0].z > 0)&&(v[1].z > 0)&&(v[2].z > 0)&& (v[3].z > 0)&&(v[4].z > 0)&&(v[5].z > 0)&& (v[6].z > 0)&&(v[7].z > 0))|| ((v[0].z < 0)&&(v[1].z < 0)&&(v[2].z < 0)&& (v[3].z < 0)&&(v[4].z < 0)&&(v[5].z < 0)&& (v[6].z < 0)&&(v[7].z < 0)))); } /* calcCellCoords_D */ __global__ void calcCellCoords_D(const float3 *vecField_D, // dim.x*dim.y*dim.z float3 *cellCoords_D) { // Get thread index uint nCells = (dim.x-1)*(dim.y-1)*(dim.z-1); uint idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx >= nCells) return; // Get coordinates of the lower/left/back corner uint3 c; c.x = idx%(dim.x-1); c.y = (idx/(dim.x-1))%(dim.y-1); c.z = (idx/(dim.x-1))/(dim.y-1); // Init stack const uint maxStackSize = 6; uint currStackPos = 0; uint currSubCell[maxStackSize]; // 0 ... 7 currSubCell[0] = 0; float cellSize = 1.0f; float3 stackCorners[maxStackSize*8]; stackCorners[0] = make_float3(0.0, 0.0, 0.0); stackCorners[1] = make_float3(1.0, 0.0, 0.0); stackCorners[2] = make_float3(0.0, 1.0, 0.0); stackCorners[3] = make_float3(1.0, 1.0, 0.0); stackCorners[4] = make_float3(0.0, 0.0, 1.0); stackCorners[5] = make_float3(1.0, 0.0, 1.0); stackCorners[6] = make_float3(0.0, 1.0, 1.0); stackCorners[7] = make_float3(1.0, 1.0, 1.0); float3 stackV[8*maxStackSize]; // Vector field at corners of current (sub-)cell stackV[0] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+0]); stackV[1] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+1]); stackV[2] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+0]); stackV[3] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+1]); stackV[4] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+0]); stackV[5] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+1]); stackV[6] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+0]); stackV[7] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+1]); float3 stackSubgrid[19*maxStackSize]; // Init cell coords cellCoords_D[idx].x = -1.0; cellCoords_D[idx].y = -1.0; cellCoords_D[idx].z = -1.0; bool done = false; while(!done) { if((isFieldVanishingInCell_D(&stackV[currStackPos*8]))&&(currSubCell[currStackPos] < 8)) { if(currStackPos < maxStackSize-1) { if(currSubCell[currStackPos] == 0) { // Compute subgrid values and put them on the stack // Edges stackSubgrid[19*currStackPos+0] = sampleVecFieldLin_D(stackV[(currStackPos)*8+0], stackV[(currStackPos)*8+1], 0.5); stackSubgrid[19*currStackPos+1] = sampleVecFieldLin_D(stackV[(currStackPos)*8+0], stackV[(currStackPos)*8+2], 0.5); stackSubgrid[19*currStackPos+2] = sampleVecFieldLin_D(stackV[(currStackPos)*8+1], stackV[(currStackPos)*8+3], 0.5); stackSubgrid[19*currStackPos+3] = sampleVecFieldLin_D(stackV[(currStackPos)*8+2], stackV[(currStackPos)*8+3], 0.5); stackSubgrid[19*currStackPos+4] = sampleVecFieldLin_D(stackV[(currStackPos)*8+0], stackV[(currStackPos)*8+4], 0.5); stackSubgrid[19*currStackPos+5] = sampleVecFieldLin_D(stackV[(currStackPos)*8+1], stackV[(currStackPos)*8+5], 0.5); stackSubgrid[19*currStackPos+6] = sampleVecFieldLin_D(stackV[(currStackPos)*8+2], stackV[(currStackPos)*8+6], 0.5); stackSubgrid[19*currStackPos+7] = sampleVecFieldLin_D(stackV[(currStackPos)*8+3], stackV[(currStackPos)*8+7], 0.5); stackSubgrid[19*currStackPos+8] = sampleVecFieldLin_D(stackV[(currStackPos)*8+4], stackV[(currStackPos)*8+5], 0.5); stackSubgrid[19*currStackPos+9] = sampleVecFieldLin_D(stackV[(currStackPos)*8+4], stackV[(currStackPos)*8+6], 0.5); stackSubgrid[19*currStackPos+10] = sampleVecFieldLin_D(stackV[(currStackPos)*8+5], stackV[(currStackPos)*8+7], 0.5); stackSubgrid[19*currStackPos+11] = sampleVecFieldLin_D(stackV[(currStackPos)*8+6], stackV[(currStackPos)*8+7], 0.5); // Faces // Back stackSubgrid[19*currStackPos+12] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+0], stackSubgrid[19*currStackPos+3], 0.5); // Front stackSubgrid[19*currStackPos+13] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+9], stackSubgrid[19*currStackPos+10], 0.5); // Bottom stackSubgrid[19*currStackPos+14] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+4], stackSubgrid[19*currStackPos+5], 0.5); // Top stackSubgrid[19*currStackPos+15] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+6], stackSubgrid[19*currStackPos+7], 0.5); // Left stackSubgrid[19*currStackPos+16] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+9], stackSubgrid[19*currStackPos+1], 0.5); // Right stackSubgrid[19*currStackPos+17] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+5], stackSubgrid[19*currStackPos+7], 0.5); // Center stackSubgrid[19*currStackPos+18] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+12], stackSubgrid[19*currStackPos+13], 0.5); } // Increment stack currStackPos++; cellSize = cellSize*0.5; // Bisect and put cell on stack if(currSubCell[currStackPos-1] == 0) { // left/down/back // Set cell corners stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); // Sample vector field at cell corners stackV[currStackPos*8+0] = stackV[(currStackPos-1)*8+0]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+0]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+1]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+12]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+4]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+14]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+16]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+18]; } else if(currSubCell[currStackPos-1] == 1) { // right/down/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+0]; stackV[currStackPos*8+1] = stackV[(currStackPos-1)*8+1]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+12]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+2]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+14]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+5]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+17]; } else if(currSubCell[currStackPos-1] == 2) { // left/top/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+1]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+12]; stackV[currStackPos*8+2] = stackV[(currStackPos-1)*8+2]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+3]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+16]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+6]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+15]; } else if(currSubCell[currStackPos-1] == 3) { // right/top/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+12]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+2]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+3]; stackV[currStackPos*8+3] = stackV[(currStackPos-1)*8+3]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+17]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+15]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+7]; } else if(currSubCell[currStackPos-1] == 4) { // left/bottom/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+4]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+14]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+16]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+4] = stackV[(currStackPos-1)*8+4]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+8]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+9]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+13]; } else if(currSubCell[currStackPos-1] == 5) { // right/bottom/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y, stackCorners[(currStackPos-1)*8+5].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y+cellSize, stackCorners[(currStackPos-1)*8+5].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+14]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+5]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+17]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+8]; stackV[currStackPos*8+5] = stackV[(currStackPos-1)*8+5]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+13]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+10]; } else if(currSubCell[currStackPos-1] == 6) { // left/top/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+6].x, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+6].x+cellSize, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+16]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+6]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+15]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+9]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+13]; stackV[currStackPos*8+6] = stackV[(currStackPos-1)*8+6]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+11]; } else if(currSubCell[currStackPos-1] == 7) { // right/top/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y+cellSize, stackCorners[(currStackPos-1)*8+5].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+6].x+cellSize, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+7].x, stackCorners[(currStackPos-1)*8+7].y, stackCorners[(currStackPos-1)*8+7].z); stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+17]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+15]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+7]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+13]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+10]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+11]; stackV[currStackPos*8+7] = stackV[(currStackPos-1)*8+7]; } currSubCell[currStackPos] = 0; } else { // Put the center of the current (sub-)cell on the stack cellCoords_D[idx].x = stackCorners[8*currStackPos+0].x + cellSize*0.5; cellCoords_D[idx].y = stackCorners[8*currStackPos+0].y + cellSize*0.5; cellCoords_D[idx].z = stackCorners[8*currStackPos+0].z + cellSize*0.5; done = true; } } else { if(currStackPos > 0) { currStackPos--; cellSize = cellSize*2.0; currSubCell[currStackPos]++; } else { // Field is not vanishing in this cell done = true; } } } } /* calcCellCoords_D2 */ __global__ void calcCellCoords_D2(const float3 *vecField_D, // dim.x*dim.y*dim.z float3 *cellCoords_D) { // Get thread index uint nCells = (dim.x-1)*(dim.y-1)*(dim.z-1); uint idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx >= nCells) return; // Get coordinates of the lower/left/back corner uint3 c; c.x = idx%(dim.x-1); c.y = (idx/(dim.x-1))%(dim.y-1); c.z = (idx/(dim.x-1))/(dim.y-1); // Init stack const uint maxStackSize = 20; uint currStackPos = 0; uint currSubCell[maxStackSize]; // 0 ... 7 currSubCell[0] = 0; float cellSize = 1.0f; float3 stackCorners[maxStackSize*8]; stackCorners[0] = make_float3(0.0, 0.0, 0.0); stackCorners[1] = make_float3(1.0, 0.0, 0.0); stackCorners[2] = make_float3(0.0, 1.0, 0.0); stackCorners[3] = make_float3(1.0, 1.0, 0.0); stackCorners[4] = make_float3(0.0, 0.0, 1.0); stackCorners[5] = make_float3(1.0, 0.0, 1.0); stackCorners[6] = make_float3(0.0, 1.0, 1.0); stackCorners[7] = make_float3(1.0, 1.0, 1.0); float3 stackV[8*maxStackSize]; // Vector field at corners of current (sub-)cell stackV[0] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+0]); stackV[1] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+1]); stackV[2] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+0]); stackV[3] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+1]); stackV[4] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+0]); stackV[5] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+1]); stackV[6] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+0]); stackV[7] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+1]); // Init cell coords cellCoords_D[idx].x = -1.0; cellCoords_D[idx].y = -1.0; cellCoords_D[idx].z = -1.0; bool done = false; while(!done) { if((isFieldVanishingInCell_D(&stackV[currStackPos*8]))&&(currSubCell[currStackPos] < 8)) { if(currStackPos < maxStackSize-1) { // Increment stack currStackPos++; cellSize = cellSize*0.5; // Bisect and put cell on stack if(currSubCell[currStackPos-1] == 0) { // left/down/back // Set cell corners stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); // Sample vector field at cell corners stackV[currStackPos*8+0] = stackV[(currStackPos-1)*8+0]; stackV[currStackPos*8+1] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], 0.5); stackV[currStackPos*8+2] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], 0.5); stackV[currStackPos*8+3] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5, 0.5); stackV[currStackPos*8+4] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+4], 0.5); stackV[currStackPos*8+5] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5, 0.5); stackV[currStackPos*8+6] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5, 0.5); stackV[currStackPos*8+7] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); } else if(currSubCell[currStackPos-1] == 1) { // right/down/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], 0.5); stackV[currStackPos*8+4] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5, 0.5); stackV[currStackPos*8+2] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5, 0.5); stackV[currStackPos*8+6] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+1] = stackV[(currStackPos-1)*8+1]; stackV[currStackPos*8+3] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], 0.5); stackV[currStackPos*8+5] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+5], 0.5); stackV[currStackPos*8+7] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5, 0.5); } else if(currSubCell[currStackPos-1] == 2) { // left/top/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+1] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5, 0.5); stackV[currStackPos*8+5] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+0] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], 0.5); stackV[currStackPos*8+2] = stackV[(currStackPos-1)*8+2]; stackV[currStackPos*8+3] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5); stackV[currStackPos*8+4] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5, 0.5); stackV[currStackPos*8+6] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+6], 0.5); stackV[currStackPos*8+7] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); } else if(currSubCell[currStackPos-1] == 3) { // right/top/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5, 0.5); stackV[currStackPos*8+4] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+2] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5); stackV[currStackPos*8+6] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+1] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], 0.5); stackV[currStackPos*8+3] = stackV[(currStackPos-1)*8+3]; stackV[currStackPos*8+5] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+7] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+7], 0.5); } else if(currSubCell[currStackPos-1] == 4) { // left/bottom/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+3] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+0] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+4], 0.5); stackV[currStackPos*8+1] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5, 0.5); stackV[currStackPos*8+2] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5, 0.5); stackV[currStackPos*8+4] = stackV[(currStackPos-1)*8+4]; stackV[currStackPos*8+5] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5); stackV[currStackPos*8+6] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5); stackV[currStackPos*8+7] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); } else if(currSubCell[currStackPos-1] == 5) { // right/bottom/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y, stackCorners[(currStackPos-1)*8+5].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y+cellSize, stackCorners[(currStackPos-1)*8+5].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5, 0.5); stackV[currStackPos*8+4] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5); stackV[currStackPos*8+2] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+6] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+1] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+5], 0.5); stackV[currStackPos*8+3] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+5] = stackV[(currStackPos-1)*8+5]; stackV[currStackPos*8+7] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5); } else if(currSubCell[currStackPos-1] == 6) { // left/top/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+6].x, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+6].x+cellSize, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+1] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+5] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+2] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+6], 0.5); stackV[currStackPos*8+0] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5, 0.5); stackV[currStackPos*8+3] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+4] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5); stackV[currStackPos*8+6] = stackV[(currStackPos-1)*8+6]; stackV[currStackPos*8+7] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5); } else if(currSubCell[currStackPos-1] == 7) { // right/top/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y+cellSize, stackCorners[(currStackPos-1)*8+5].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+6].x+cellSize, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+7].x, stackCorners[(currStackPos-1)*8+7].y, stackCorners[(currStackPos-1)*8+7].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+4] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+2] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+6] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5); stackV[currStackPos*8+1] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+3] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+7], 0.5); stackV[currStackPos*8+5] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5); stackV[currStackPos*8+7] = stackV[(currStackPos-1)*8+7]; } currSubCell[currStackPos] = 0; } else { // Put the center of the current (sub-)cell on the stack cellCoords_D[idx].x = stackCorners[8*currStackPos+0].x + cellSize*0.5; cellCoords_D[idx].y = stackCorners[8*currStackPos+0].y + cellSize*0.5; cellCoords_D[idx].z = stackCorners[8*currStackPos+0].z + cellSize*0.5; done = true; } } else { if(currStackPos > 0) { currStackPos--; cellSize = cellSize*2.0; currSubCell[currStackPos]++; } else { // Field is not vanishing in this cell done = true; } } } } extern "C" { /* SetGridParams */ hipError_t SetGridParams(uint3 dim_h, float3 org_h, float3 maxCoord_h, float3 spacing_h) { checkCudaErrors(hipMemcpyToSymbol(dim, &dim_h, sizeof(uint3))); checkCudaErrors(hipMemcpyToSymbol(org, &org_h, sizeof(float3))); checkCudaErrors(hipMemcpyToSymbol(maxCoord, &maxCoord_h, sizeof(float3))); checkCudaErrors(hipMemcpyToSymbol(spacing, &spacing_h, sizeof(float3))); return hipGetLastError(); } /* SetStreamlineStepsize */ hipError_t SetStreamlineParams(float stepsize_h, uint maxSteps) { checkCudaErrors(hipMemcpyToSymbol(streamlinesStep, &stepsize_h, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(streamlinesStepCnt, &maxSteps, sizeof(uint))); return hipGetLastError(); } /* SetNumberOfPos */ hipError_t SetNumberOfPos(uint nPos_h) { checkCudaErrors(hipMemcpyToSymbol(nPos, &nPos_h, sizeof(uint))); return hipGetLastError(); } /* UpdatePosition */ hipError_t UpdatePositionRK4( const float *vecField, uint3 dim, float *pos, uint nPos, uint maxIt, bool backward) { uint nThreadsPerBlock = min(512, nPos); uint nBlocks = ceil((float)(nPos)/(float)(nThreadsPerBlock)); float3 *vecField_D, *pos_D; // Allocate device memory checkCudaErrors(hipMalloc((void **)&vecField_D, sizeof(float)*dim.x*dim.y*dim.z*3)); checkCudaErrors(hipMalloc((void **)&pos_D, sizeof(float)*nPos*3)); // Copy vec field data to device memory checkCudaErrors(hipMemcpy(vecField_D, vecField, sizeof(float)*dim.x*dim.y*dim.z*3, hipMemcpyHostToDevice)); // Copy positions to device memory checkCudaErrors(hipMemcpy(pos_D, pos, sizeof(float)*nPos*3, hipMemcpyHostToDevice)); if(backward) { //printf("CUDA streamline integration (backward), max steps %u\n", maxIt); // DEBUG for(uint i = 0; i < maxIt; i++) { // Update position maxIt times hipLaunchKernelGGL(( UpdatePositionBackwardRK4_D) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, vecField_D, pos_D); hipDeviceSynchronize(); } } else { //printf("CUDA streamline integration, max steps %u\n", maxIt); // DEBUG for(uint i = 0; i < maxIt; i++) { // Update position maxIt times hipLaunchKernelGGL(( UpdatePositionRK4_D) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, vecField_D, pos_D); hipDeviceSynchronize(); } } // Copy updated positions back to host memory checkCudaErrors(hipMemcpy(pos, pos_D, sizeof(float)*nPos*3, hipMemcpyDeviceToHost)); // Cleanup device memory checkCudaErrors(hipFree(vecField_D)); checkCudaErrors(hipFree(pos_D)); return hipGetLastError(); } /* SearchNullPoints */ hipError_t SearchNullPoints( const float *vecField, uint3 dim, float3 org, float3 spacing, float *cellCoords, unsigned int maxStackSize) { uint n = (dim.x-1)*(dim.y-1)*(dim.z-1); uint nThreadsPerBlock = min(512, n); uint nBlocks = ceil((float)(n)/(float)(nThreadsPerBlock)); float3 *vecField_D, *cellCoords_D; // Allocate device memory checkCudaErrors(hipMalloc((void **)&vecField_D, sizeof(float)*dim.x*dim.y*dim.z*3)); checkCudaErrors(hipMalloc((void **)&cellCoords_D, sizeof(float)*n*3)); // Copy vec field data to device memory checkCudaErrors(hipMemcpy(vecField_D, vecField, sizeof(float)*dim.x*dim.y*dim.z*3, hipMemcpyHostToDevice)); // Calculate cell coordinates of the critical points hipLaunchKernelGGL(( calcCellCoords_D2) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, vecField_D, cellCoords_D); // Copy cell coords back to host memory checkCudaErrors(hipMemcpy(cellCoords, cellCoords_D, sizeof(float)*n*3, hipMemcpyDeviceToHost)); // Cleanup device memory checkCudaErrors(hipFree(vecField_D)); checkCudaErrors(hipFree(cellCoords_D)); return hipGetLastError(); } } // Streamline integration ////////////////////////////////////////////////////// /** * Calculates the gradient field of a given scalar field. * * @param[in] scalarField_D The scalar field (device memory) * @param[out] gradientField_D The gradient field (device memory) */ __global__ void CalcGradient_D(float *scalarDield_D, float *gradientField_D) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; // Get coordinates of the lower/left/back corner // uint3 c; // c.x = idx%(dim.x-1); // c.y = (idx/(dim.x-1))%(dim.y-1); // c.z = (idx/(dim.x-1))/(dim.y-1); uint3 c; c.x = idx%dim.x; c.y = (idx/dim.x)%dim.y; c.z = (idx/dim.x)/dim.y; // Omit border cells if (c.x == 0) { return; } if (c.y == 0) { return; } if (c.z == 0) { return; } if (c.x >= dim.x-1) { return; } if (c.y >= dim.y-1) { return; } if (c.z >= dim.z-1) { return; } float3 gradient; gradient.x = scalarDield_D[dim.x*(dim.y*c.z+c.y)+c.x+1]; gradient.y = scalarDield_D[dim.x*(dim.y*c.z+c.y+1)+c.x]; gradient.z = scalarDield_D[dim.x*(dim.y*(c.z+1)+c.y)+c.x]; gradient.x -= scalarDield_D[dim.x*(dim.y*c.z+c.y)+c.x-1]; gradient.y -= scalarDield_D[dim.x*(dim.y*c.z+c.y-1)+c.x]; gradient.z -= scalarDield_D[dim.x*(dim.y*(c.z-1)+c.y)+c.x]; gradient = normalize(gradient); gradientField_D[3*idx+0] = gradient.x; gradientField_D[3*idx+1] = gradient.y; gradientField_D[3*idx+2] = gradient.z; } extern "C" hipError_t CalcGradient(float *scalarDield_D, float *gradientField_D, uint volsize) { #ifdef USE_TIMER float dt_ms; hipEvent_t event1, event2; hipEventCreate(&event1); hipEventCreate(&event2); hipEventRecord(event1, 0); #endif const int threadsPerBlock = 256; const int blocksPerGrid = (volsize + threadsPerBlock - 1) / threadsPerBlock; dim3 grid(blocksPerGrid, 1, 1); // Calculate gradient of the scalar field hipLaunchKernelGGL(( CalcGradient_D) , dim3(grid), dim3(threadsPerBlock) , 0, 0, scalarDield_D, gradientField_D); #ifdef USE_TIMER hipEventRecord(event2, 0); hipEventSynchronize(event1); hipEventSynchronize(event2); hipEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time 'CalcGradient_D': %.10f sec\n", dt_ms/1000.0f); #endif return hipGetLastError(); } /** TODO */ __global__ void InitStartPos_D(float *vertexDataBuffer_D, float *streamlinePos_D, uint vertexDataBufferStride, uint vertexDataBufferOffsPos) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > nPos) { return; } streamlinePos_D[idx*streamlinesStepCnt*6+0] = vertexDataBuffer_D[idx*vertexDataBufferStride+vertexDataBufferOffsPos+0]; streamlinePos_D[idx*streamlinesStepCnt*6+1] = vertexDataBuffer_D[idx*vertexDataBufferStride+vertexDataBufferOffsPos+1]; streamlinePos_D[idx*streamlinesStepCnt*6+2] = vertexDataBuffer_D[idx*vertexDataBufferStride+vertexDataBufferOffsPos+2]; } hipError_t InitStartPos(float *vertexDataBuffer_D, float *streamlinePos_D, uint vertexDataBufferStride, uint vertexDataBufferOffsPos, uint vertexCnt) { #ifdef USE_TIMER float dt_ms; hipEvent_t event1, event2; hipEventCreate(&event1); hipEventCreate(&event2); hipEventRecord(event1, 0); #endif const int threadsPerBlock = 256; const int blocksPerGrid = (vertexCnt + threadsPerBlock - 1) / threadsPerBlock; dim3 grid(blocksPerGrid, 1, 1); // Calculate gradient of the scalar field hipLaunchKernelGGL(( InitStartPos_D) , dim3(grid), dim3(threadsPerBlock) , 0, 0, vertexDataBuffer_D, streamlinePos_D, vertexDataBufferStride, vertexDataBufferOffsPos); #ifdef USE_TIMER hipEventRecord(event2, 0); hipEventSynchronize(event1); hipEventSynchronize(event2); hipEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time 'InitStartPos_D': %.10f sec\n", dt_ms/1000.0f); #endif return hipGetLastError(); } /** TODO */ __global__ void UpdateStreamlinePos_D(float *streamlinePos_D, float3 *gradientField_D, uint step) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > nPos) { return; } float3 currPos; currPos.x = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+0]; currPos.y = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+1]; currPos.z = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+2]; float3 x0, x1, x2, x3, v0, v1, v2, v3; v0 = make_float3(0.0, 0.0, 0.0); v1 = make_float3(0.0, 0.0, 0.0); v2 = make_float3(0.0, 0.0, 0.0); v3 = make_float3(0.0, 0.0, 0.0); x0 = currPos; v0 = normalize(sampleVecFieldAtTrilinNorm_D(x0, gradientField_D)); v0 *= streamlinesStep; x1 = x0 + 0.5*v0; if(isValidGridPos_D(x1)) { v1 = normalize(sampleVecFieldAtTrilinNorm_D(x1, gradientField_D)); v1 *= streamlinesStep; } x2 = x0 + 0.5f*v1; if(isValidGridPos_D(x2)) { v2 = normalize(sampleVecFieldAtTrilinNorm_D(x2, gradientField_D)); v2 *= streamlinesStep; } x3 = x0 + v2; if(isValidGridPos_D(x3)) { v3 = normalize(sampleVecFieldAtTrilinNorm_D(x3, gradientField_D)); v3 *= streamlinesStep; } x0 += (1.0f/6.0f)*(v0+2.0f*v1+2.0f*v2+v3); // Copy position to streamline position array if it is valid if(isValidGridPos_D(x0)) { streamlinePos_D[idx*streamlinesStepCnt*6+step*6+3] = x0.x; streamlinePos_D[idx*streamlinesStepCnt*6+step*6+4] = x0.y; streamlinePos_D[idx*streamlinesStepCnt*6+step*6+5] = x0.z; } else { streamlinePos_D[idx*streamlinesStepCnt*6+step*6+3] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+0]; streamlinePos_D[idx*streamlinesStepCnt*6+step*6+4] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+1]; streamlinePos_D[idx*streamlinesStepCnt*6+step*6+5] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+2]; } // Copy new position to the next line segment streamlinePos_D[idx*streamlinesStepCnt*6+(step+1)*6+0] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+3]; streamlinePos_D[idx*streamlinesStepCnt*6+(step+1)*6+1] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+4]; streamlinePos_D[idx*streamlinesStepCnt*6+(step+1)*6+2] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+5]; } hipError_t UpdateStreamlinePos(float *streamlinePos_D,float *gradientField_D, uint vertexCnt, uint step) { #ifdef USE_TIMER float dt_ms; hipEvent_t event1, event2; hipEventCreate(&event1); hipEventCreate(&event2); hipEventRecord(event1, 0); #endif const int threadsPerBlock = 256; const int blocksPerGrid = (vertexCnt + threadsPerBlock - 1) / threadsPerBlock; dim3 grid(blocksPerGrid, 1, 1); // Calculate gradient of the scalar field hipLaunchKernelGGL(( UpdateStreamlinePos_D) , dim3(grid), dim3(threadsPerBlock) , 0, 0, streamlinePos_D, (float3*)(gradientField_D), step); #ifdef USE_TIMER hipEventRecord(event2, 0); hipEventSynchronize(event1); hipEventSynchronize(event2); hipEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time 'UpdateStreamlinePos_D': %.10f sec\n", dt_ms/1000.0f); #endif return hipGetLastError(); }
3487174cf71db3736a286e50a0f2f29364165c30.cu
// // VecField3DCUDA.cu // // Copyright (C) 2013 by University of Stuttgart (VISUS). // All rights reserved. // #include "helper_cuda.h" #include "helper_functions.h" #include "helper_math.h" // Shut up eclipse syntax error highlighting #ifdef __CDT_PARSER__ #define __device__ #define __global__ #define __shared__ #define __constant__ #define __host__ #endif // Toggle performance measurement and respective messages #define USE_TIMER __constant__ __device__ uint3 dim; __constant__ __device__ float3 org; __constant__ __device__ float3 maxCoord; __constant__ __device__ float3 spacing; __constant__ __device__ float streamlinesStep; __constant__ __device__ uint streamlinesStepCnt; __constant__ __device__ uint nPos; __constant__ __device__ uint maxStackSize; /* isValidGridPos_D */ inline __device__ bool isValidGridPos_D(float3 pos) { return (pos.x < maxCoord.x)&& (pos.y < maxCoord.y)&& (pos.z < maxCoord.z)&& (pos.x >= org.x)&& (pos.y >= org.y)&& (pos.z >= org.z); } /* sampleVecFieldLin_D */ inline __device__ float3 sampleVecFieldLin_D(float3 v0, float3 v1, float alpha) { return v0+alpha*(v1-v0); } /* sampleVecFieldBilin_D */ inline __device__ float3 sampleVecFieldBilin_D(float3 v0, float3 v1, float3 v2, float3 v3, float alpha, float beta) { return sampleVecFieldLin_D(sampleVecFieldLin_D(v0, v1, alpha), sampleVecFieldLin_D(v2, v3, alpha), beta); } /* sampleVecFieldTrilin_D */ inline __device__ float3 sampleVecFieldTrilin_D(float3 v[8], float alpha, float beta, float gamma) { return sampleVecFieldLin_D( sampleVecFieldBilin_D(v[0], v[1], v[2], v[3], alpha, beta), sampleVecFieldBilin_D(v[4], v[5], v[6], v[7], alpha, beta), gamma); /* float3 a, b, c, d, e, f, g, h; a = v[0]; b = v[1] - v[0]; c = v[2] - v[0]; d = v[3] - v[1] - v[2] + v[0]; e = v[4] - v[0]; f = v[5] - v[1] - v[4] + v[0]; g = v[6] - v[2] - v[4] + v[0]; h = v[7] - v[3] - v[5] - v[6] + v[1] + v[2] + v[4] - v[0]; return a + b*alpha + c*beta + d*alpha*beta + e*gamma + f*alpha*gamma + g*beta*gamma + h*alpha*beta*gamma; */ } /* sampleVecFieldAtTrilinNorm_D */ inline __device__ float3 sampleVecFieldAtTrilinNorm_D(float3 pos, const float3 *vecField_D) { float3 f; uint3 c; // Get id of the cell containing the given position and interpolation // coefficients f.x = (pos.x-org.x)/spacing.x; f.y = (pos.y-org.y)/spacing.y; f.z = (pos.z-org.z)/spacing.z; c.x = (uint)(f.x); c.y = (uint)(f.y); c.z = (uint)(f.z); f.x = f.x-(float)c.x; // alpha f.y = f.y-(float)c.y; // beta f.z = f.z-(float)c.z; // gamma // Get vector field at corners of current cell float3 v[8]; v[0] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+0]); v[1] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+1]); v[2] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+0]); v[3] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+1]); v[4] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+0]); v[5] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+1]); v[6] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+0]); v[7] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+1]); return sampleVecFieldTrilin_D(v, f.x, f.y, f.z); } /* UpdatePositionRK4_D */ __global__ void UpdatePositionRK4_D(const float3 *vecField_D, float3 *pos_D) { // Get thread idx uint idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx >= nPos) return; //float3 posAlt = pos_D[idx]; float3 x0, x1, x2, x3, v0, v1, v2, v3; v0 = make_float3(0.0, 0.0, 0.0); v1 = make_float3(0.0, 0.0, 0.0); v2 = make_float3(0.0, 0.0, 0.0); v3 = make_float3(0.0, 0.0, 0.0); x0 = pos_D[idx]; v0 = normalize(sampleVecFieldAtTrilinNorm_D(x0, vecField_D)); v0 *= streamlinesStep; x1 = x0 + 0.5*v0; if(isValidGridPos_D(x1)) { v1 = normalize(sampleVecFieldAtTrilinNorm_D(x1, vecField_D)); v1 *= streamlinesStep; } x2 = x0 + 0.5f*v1; if(isValidGridPos_D(x2)) { v2 = normalize(sampleVecFieldAtTrilinNorm_D(x2, vecField_D)); v2 *= streamlinesStep; } x3 = x0 + v2; if(isValidGridPos_D(x3)) { v3 = normalize(sampleVecFieldAtTrilinNorm_D(x3, vecField_D)); v3 *= streamlinesStep; } x0 += (1.0f/6.0f)*(v0+2.0f*v1+2.0f*v2+v3); if(isValidGridPos_D(x0)) { pos_D[idx] = x0; } /*//pos_D[idx] = (1.0f/6.0f)*(v0+2.0f*v1+2.0f*v2+v3); // DEBUG //pos_D[idx] = v0; // DEBUG float3 f; uint3 c; f.x = (posAlt.x-org.x)/spacing.x; f.y = (posAlt.y-org.y)/spacing.y; f.z = (posAlt.z-org.z)/spacing.z; c.x = (uint)(f.x); c.y = (uint)(f.y); c.z = (uint)(f.z); f.x = f.x-(float)c.x; // alpha f.y = f.y-(float)c.y; // beta f.z = f.z-(float)c.z; // gamma // Get vector field at corners of current cell float3 v[8]; v[0] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+0]; v[1] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+1]; v[2] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+0]; v[3] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+1]; v[4] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+0]; v[5] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+1]; v[6] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+0]; v[7] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+1]; float3 v_test = sampleVecFieldAtTrilinNorm_D(posAlt, vecField_D); //pos_D[idx] = make_float3(f.x, f.y, f.z); // DEBUG //pos_D[idx] = make_float3((float)c.x, (float)c.y, (float)c.z); // DEBUG //pos_D[idx] = make_float3(org.x, org.y, org.z); // DEBUG //pos_D[idx] = make_float3(spacing.x, spacing.y, spacing.z); // DEBUG //pos_D[idx] = make_float3(v[7].x, v[7].y, v[7].z); // DEBUG //pos_D[idx] = make_float3(v_test.x, v_test.y, v_test.z); // DEBUG*/ } /* UpdatePositionRK4_D */ __global__ void UpdatePositionBackwardRK4_D(const float3 *vecField_D, float3 *pos_D) { // Get thread idx uint idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx >= nPos) return; //float3 posAlt = pos_D[idx]; float3 x0, x1, x2, x3, v0, v1, v2, v3; v0 = make_float3(0.0, 0.0, 0.0); v1 = make_float3(0.0, 0.0, 0.0); v2 = make_float3(0.0, 0.0, 0.0); v3 = make_float3(0.0, 0.0, 0.0); x0 = pos_D[idx]; v0 = normalize(sampleVecFieldAtTrilinNorm_D(x0, vecField_D)); v0 *= streamlinesStep; x1 = x0 - 0.5*v0; if(isValidGridPos_D(x1)) { v1 = normalize(sampleVecFieldAtTrilinNorm_D(x1, vecField_D)); v1 *= streamlinesStep; } x2 = x0 - 0.5f*v1; if(isValidGridPos_D(x2)) { v2 = normalize(sampleVecFieldAtTrilinNorm_D(x2, vecField_D)); v2 *= streamlinesStep; } x3 = x0 - v2; if(isValidGridPos_D(x3)) { v3 = normalize(sampleVecFieldAtTrilinNorm_D(x3, vecField_D)); v3 *= streamlinesStep; } x0 -= (1.0f/6.0f)*(v0+2.0f*v1+2.0f*v2+v3); if(isValidGridPos_D(x0)) { pos_D[idx] = x0; } /*//pos_D[idx] = (1.0f/6.0f)*(v0+2.0f*v1+2.0f*v2+v3); // DEBUG //pos_D[idx] = v0; // DEBUG float3 f; uint3 c; f.x = (posAlt.x-org.x)/spacing.x; f.y = (posAlt.y-org.y)/spacing.y; f.z = (posAlt.z-org.z)/spacing.z; c.x = (uint)(f.x); c.y = (uint)(f.y); c.z = (uint)(f.z); f.x = f.x-(float)c.x; // alpha f.y = f.y-(float)c.y; // beta f.z = f.z-(float)c.z; // gamma // Get vector field at corners of current cell float3 v[8]; v[0] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+0]; v[1] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+1]; v[2] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+0]; v[3] = vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+1]; v[4] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+0]; v[5] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+1]; v[6] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+0]; v[7] = vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+1]; float3 v_test = sampleVecFieldAtTrilinNorm_D(posAlt, vecField_D); //pos_D[idx] = make_float3(f.x, f.y, f.z); // DEBUG //pos_D[idx] = make_float3((float)c.x, (float)c.y, (float)c.z); // DEBUG //pos_D[idx] = make_float3(org.x, org.y, org.z); // DEBUG //pos_D[idx] = make_float3(spacing.x, spacing.y, spacing.z); // DEBUG //pos_D[idx] = make_float3(v[7].x, v[7].y, v[7].z); // DEBUG //pos_D[idx] = make_float3(v_test.x, v_test.y, v_test.z); // DEBUG*/ } /* isFieldVanishingInCell */ inline __device__ bool isFieldVanishingInCell_D(float3 v[8]) { return (!(((v[0].x > 0)&&(v[1].x > 0)&&(v[2].x > 0)&& (v[3].x > 0)&&(v[4].x > 0)&&(v[5].x > 0)&& (v[6].x > 0)&&(v[7].x > 0))|| ((v[0].x < 0)&&(v[1].x < 0)&&(v[2].x < 0)&& (v[3].x < 0)&&(v[4].x < 0)&&(v[5].x < 0)&& (v[6].x < 0)&&(v[7].x < 0))|| ((v[0].y > 0)&&(v[1].y > 0)&&(v[2].y > 0)&& (v[3].y > 0)&&(v[4].y > 0)&&(v[5].y > 0)&& (v[6].y > 0)&&(v[7].y > 0))|| ((v[0].y < 0)&&(v[1].y < 0)&&(v[2].y < 0)&& (v[3].y < 0)&&(v[4].y < 0)&&(v[5].y < 0)&& (v[6].y < 0)&&(v[7].y < 0))|| ((v[0].z > 0)&&(v[1].z > 0)&&(v[2].z > 0)&& (v[3].z > 0)&&(v[4].z > 0)&&(v[5].z > 0)&& (v[6].z > 0)&&(v[7].z > 0))|| ((v[0].z < 0)&&(v[1].z < 0)&&(v[2].z < 0)&& (v[3].z < 0)&&(v[4].z < 0)&&(v[5].z < 0)&& (v[6].z < 0)&&(v[7].z < 0)))); } /* calcCellCoords_D */ __global__ void calcCellCoords_D(const float3 *vecField_D, // dim.x*dim.y*dim.z float3 *cellCoords_D) { // Get thread index uint nCells = (dim.x-1)*(dim.y-1)*(dim.z-1); uint idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx >= nCells) return; // Get coordinates of the lower/left/back corner uint3 c; c.x = idx%(dim.x-1); c.y = (idx/(dim.x-1))%(dim.y-1); c.z = (idx/(dim.x-1))/(dim.y-1); // Init stack const uint maxStackSize = 6; uint currStackPos = 0; uint currSubCell[maxStackSize]; // 0 ... 7 currSubCell[0] = 0; float cellSize = 1.0f; float3 stackCorners[maxStackSize*8]; stackCorners[0] = make_float3(0.0, 0.0, 0.0); stackCorners[1] = make_float3(1.0, 0.0, 0.0); stackCorners[2] = make_float3(0.0, 1.0, 0.0); stackCorners[3] = make_float3(1.0, 1.0, 0.0); stackCorners[4] = make_float3(0.0, 0.0, 1.0); stackCorners[5] = make_float3(1.0, 0.0, 1.0); stackCorners[6] = make_float3(0.0, 1.0, 1.0); stackCorners[7] = make_float3(1.0, 1.0, 1.0); float3 stackV[8*maxStackSize]; // Vector field at corners of current (sub-)cell stackV[0] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+0]); stackV[1] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+1]); stackV[2] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+0]); stackV[3] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+1]); stackV[4] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+0]); stackV[5] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+1]); stackV[6] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+0]); stackV[7] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+1]); float3 stackSubgrid[19*maxStackSize]; // Init cell coords cellCoords_D[idx].x = -1.0; cellCoords_D[idx].y = -1.0; cellCoords_D[idx].z = -1.0; bool done = false; while(!done) { if((isFieldVanishingInCell_D(&stackV[currStackPos*8]))&&(currSubCell[currStackPos] < 8)) { if(currStackPos < maxStackSize-1) { if(currSubCell[currStackPos] == 0) { // Compute subgrid values and put them on the stack // Edges stackSubgrid[19*currStackPos+0] = sampleVecFieldLin_D(stackV[(currStackPos)*8+0], stackV[(currStackPos)*8+1], 0.5); stackSubgrid[19*currStackPos+1] = sampleVecFieldLin_D(stackV[(currStackPos)*8+0], stackV[(currStackPos)*8+2], 0.5); stackSubgrid[19*currStackPos+2] = sampleVecFieldLin_D(stackV[(currStackPos)*8+1], stackV[(currStackPos)*8+3], 0.5); stackSubgrid[19*currStackPos+3] = sampleVecFieldLin_D(stackV[(currStackPos)*8+2], stackV[(currStackPos)*8+3], 0.5); stackSubgrid[19*currStackPos+4] = sampleVecFieldLin_D(stackV[(currStackPos)*8+0], stackV[(currStackPos)*8+4], 0.5); stackSubgrid[19*currStackPos+5] = sampleVecFieldLin_D(stackV[(currStackPos)*8+1], stackV[(currStackPos)*8+5], 0.5); stackSubgrid[19*currStackPos+6] = sampleVecFieldLin_D(stackV[(currStackPos)*8+2], stackV[(currStackPos)*8+6], 0.5); stackSubgrid[19*currStackPos+7] = sampleVecFieldLin_D(stackV[(currStackPos)*8+3], stackV[(currStackPos)*8+7], 0.5); stackSubgrid[19*currStackPos+8] = sampleVecFieldLin_D(stackV[(currStackPos)*8+4], stackV[(currStackPos)*8+5], 0.5); stackSubgrid[19*currStackPos+9] = sampleVecFieldLin_D(stackV[(currStackPos)*8+4], stackV[(currStackPos)*8+6], 0.5); stackSubgrid[19*currStackPos+10] = sampleVecFieldLin_D(stackV[(currStackPos)*8+5], stackV[(currStackPos)*8+7], 0.5); stackSubgrid[19*currStackPos+11] = sampleVecFieldLin_D(stackV[(currStackPos)*8+6], stackV[(currStackPos)*8+7], 0.5); // Faces // Back stackSubgrid[19*currStackPos+12] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+0], stackSubgrid[19*currStackPos+3], 0.5); // Front stackSubgrid[19*currStackPos+13] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+9], stackSubgrid[19*currStackPos+10], 0.5); // Bottom stackSubgrid[19*currStackPos+14] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+4], stackSubgrid[19*currStackPos+5], 0.5); // Top stackSubgrid[19*currStackPos+15] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+6], stackSubgrid[19*currStackPos+7], 0.5); // Left stackSubgrid[19*currStackPos+16] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+9], stackSubgrid[19*currStackPos+1], 0.5); // Right stackSubgrid[19*currStackPos+17] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+5], stackSubgrid[19*currStackPos+7], 0.5); // Center stackSubgrid[19*currStackPos+18] = sampleVecFieldLin_D(stackSubgrid[19*currStackPos+12], stackSubgrid[19*currStackPos+13], 0.5); } // Increment stack currStackPos++; cellSize = cellSize*0.5; // Bisect and put cell on stack if(currSubCell[currStackPos-1] == 0) { // left/down/back // Set cell corners stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); // Sample vector field at cell corners stackV[currStackPos*8+0] = stackV[(currStackPos-1)*8+0]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+0]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+1]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+12]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+4]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+14]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+16]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+18]; } else if(currSubCell[currStackPos-1] == 1) { // right/down/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+0]; stackV[currStackPos*8+1] = stackV[(currStackPos-1)*8+1]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+12]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+2]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+14]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+5]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+17]; } else if(currSubCell[currStackPos-1] == 2) { // left/top/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+1]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+12]; stackV[currStackPos*8+2] = stackV[(currStackPos-1)*8+2]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+3]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+16]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+6]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+15]; } else if(currSubCell[currStackPos-1] == 3) { // right/top/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+12]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+2]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+3]; stackV[currStackPos*8+3] = stackV[(currStackPos-1)*8+3]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+17]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+15]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+7]; } else if(currSubCell[currStackPos-1] == 4) { // left/bottom/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+4]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+14]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+16]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+4] = stackV[(currStackPos-1)*8+4]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+8]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+9]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+13]; } else if(currSubCell[currStackPos-1] == 5) { // right/bottom/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y, stackCorners[(currStackPos-1)*8+5].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y+cellSize, stackCorners[(currStackPos-1)*8+5].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+14]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+5]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+17]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+8]; stackV[currStackPos*8+5] = stackV[(currStackPos-1)*8+5]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+13]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+10]; } else if(currSubCell[currStackPos-1] == 6) { // left/top/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+6].x, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+6].x+cellSize, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+16]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+6]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+15]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+9]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+13]; stackV[currStackPos*8+6] = stackV[(currStackPos-1)*8+6]; stackV[currStackPos*8+7] = stackSubgrid[19*(currStackPos-1)+11]; } else if(currSubCell[currStackPos-1] == 7) { // right/top/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y+cellSize, stackCorners[(currStackPos-1)*8+5].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+6].x+cellSize, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+7].x, stackCorners[(currStackPos-1)*8+7].y, stackCorners[(currStackPos-1)*8+7].z); stackV[currStackPos*8+0] = stackSubgrid[19*(currStackPos-1)+18]; stackV[currStackPos*8+1] = stackSubgrid[19*(currStackPos-1)+17]; stackV[currStackPos*8+2] = stackSubgrid[19*(currStackPos-1)+15]; stackV[currStackPos*8+3] = stackSubgrid[19*(currStackPos-1)+7]; stackV[currStackPos*8+4] = stackSubgrid[19*(currStackPos-1)+13]; stackV[currStackPos*8+5] = stackSubgrid[19*(currStackPos-1)+10]; stackV[currStackPos*8+6] = stackSubgrid[19*(currStackPos-1)+11]; stackV[currStackPos*8+7] = stackV[(currStackPos-1)*8+7]; } currSubCell[currStackPos] = 0; } else { // Put the center of the current (sub-)cell on the stack cellCoords_D[idx].x = stackCorners[8*currStackPos+0].x + cellSize*0.5; cellCoords_D[idx].y = stackCorners[8*currStackPos+0].y + cellSize*0.5; cellCoords_D[idx].z = stackCorners[8*currStackPos+0].z + cellSize*0.5; done = true; } } else { if(currStackPos > 0) { currStackPos--; cellSize = cellSize*2.0; currSubCell[currStackPos]++; } else { // Field is not vanishing in this cell done = true; } } } } /* calcCellCoords_D2 */ __global__ void calcCellCoords_D2(const float3 *vecField_D, // dim.x*dim.y*dim.z float3 *cellCoords_D) { // Get thread index uint nCells = (dim.x-1)*(dim.y-1)*(dim.z-1); uint idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx >= nCells) return; // Get coordinates of the lower/left/back corner uint3 c; c.x = idx%(dim.x-1); c.y = (idx/(dim.x-1))%(dim.y-1); c.z = (idx/(dim.x-1))/(dim.y-1); // Init stack const uint maxStackSize = 20; uint currStackPos = 0; uint currSubCell[maxStackSize]; // 0 ... 7 currSubCell[0] = 0; float cellSize = 1.0f; float3 stackCorners[maxStackSize*8]; stackCorners[0] = make_float3(0.0, 0.0, 0.0); stackCorners[1] = make_float3(1.0, 0.0, 0.0); stackCorners[2] = make_float3(0.0, 1.0, 0.0); stackCorners[3] = make_float3(1.0, 1.0, 0.0); stackCorners[4] = make_float3(0.0, 0.0, 1.0); stackCorners[5] = make_float3(1.0, 0.0, 1.0); stackCorners[6] = make_float3(0.0, 1.0, 1.0); stackCorners[7] = make_float3(1.0, 1.0, 1.0); float3 stackV[8*maxStackSize]; // Vector field at corners of current (sub-)cell stackV[0] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+0]); stackV[1] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+0))+c.x+1]); stackV[2] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+0]); stackV[3] = normalize(vecField_D[dim.x*(dim.y*(c.z+0) + (c.y+1))+c.x+1]); stackV[4] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+0]); stackV[5] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+0))+c.x+1]); stackV[6] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+0]); stackV[7] = normalize(vecField_D[dim.x*(dim.y*(c.z+1) + (c.y+1))+c.x+1]); // Init cell coords cellCoords_D[idx].x = -1.0; cellCoords_D[idx].y = -1.0; cellCoords_D[idx].z = -1.0; bool done = false; while(!done) { if((isFieldVanishingInCell_D(&stackV[currStackPos*8]))&&(currSubCell[currStackPos] < 8)) { if(currStackPos < maxStackSize-1) { // Increment stack currStackPos++; cellSize = cellSize*0.5; // Bisect and put cell on stack if(currSubCell[currStackPos-1] == 0) { // left/down/back // Set cell corners stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); // Sample vector field at cell corners stackV[currStackPos*8+0] = stackV[(currStackPos-1)*8+0]; stackV[currStackPos*8+1] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], 0.5); stackV[currStackPos*8+2] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], 0.5); stackV[currStackPos*8+3] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5, 0.5); stackV[currStackPos*8+4] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+4], 0.5); stackV[currStackPos*8+5] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5, 0.5); stackV[currStackPos*8+6] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5, 0.5); stackV[currStackPos*8+7] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); } else if(currSubCell[currStackPos-1] == 1) { // right/down/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], 0.5); stackV[currStackPos*8+4] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5, 0.5); stackV[currStackPos*8+2] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5, 0.5); stackV[currStackPos*8+6] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+1] = stackV[(currStackPos-1)*8+1]; stackV[currStackPos*8+3] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], 0.5); stackV[currStackPos*8+5] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+5], 0.5); stackV[currStackPos*8+7] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5, 0.5); } else if(currSubCell[currStackPos-1] == 2) { // left/top/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+1] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5, 0.5); stackV[currStackPos*8+5] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+0] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], 0.5); stackV[currStackPos*8+2] = stackV[(currStackPos-1)*8+2]; stackV[currStackPos*8+3] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5); stackV[currStackPos*8+4] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5, 0.5); stackV[currStackPos*8+6] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+6], 0.5); stackV[currStackPos*8+7] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); } else if(currSubCell[currStackPos-1] == 3) { // right/top/back stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z+cellSize); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5, 0.5); stackV[currStackPos*8+4] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+2] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], 0.5); stackV[currStackPos*8+6] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+1] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], 0.5); stackV[currStackPos*8+3] = stackV[(currStackPos-1)*8+3]; stackV[currStackPos*8+5] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+7] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+7], 0.5); } else if(currSubCell[currStackPos-1] == 4) { // left/bottom/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+3] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+0] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+4], 0.5); stackV[currStackPos*8+1] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5, 0.5); stackV[currStackPos*8+2] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5, 0.5); stackV[currStackPos*8+4] = stackV[(currStackPos-1)*8+4]; stackV[currStackPos*8+5] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5); stackV[currStackPos*8+6] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5); stackV[currStackPos*8+7] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); } else if(currSubCell[currStackPos-1] == 5) { // right/bottom/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y, stackCorners[(currStackPos-1)*8+5].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y+cellSize, stackCorners[(currStackPos-1)*8+5].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5, 0.5); stackV[currStackPos*8+4] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], 0.5); stackV[currStackPos*8+2] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+6] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+1] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+5], 0.5); stackV[currStackPos*8+3] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+5] = stackV[(currStackPos-1)*8+5]; stackV[currStackPos*8+7] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5); } else if(currSubCell[currStackPos-1] == 6) { // left/top/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+6].x, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+6].x+cellSize, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+1] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+5] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+2] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+6], 0.5); stackV[currStackPos*8+0] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+0], stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5, 0.5); stackV[currStackPos*8+3] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+4] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+6], 0.5); stackV[currStackPos*8+6] = stackV[(currStackPos-1)*8+6]; stackV[currStackPos*8+7] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5); } else if(currSubCell[currStackPos-1] == 7) { // right/top/front stackCorners[currStackPos*8+0] = make_float3(stackCorners[(currStackPos-1)*8+0].x+cellSize, stackCorners[(currStackPos-1)*8+0].y+cellSize, stackCorners[(currStackPos-1)*8+0].z+cellSize); stackCorners[currStackPos*8+1] = make_float3(stackCorners[(currStackPos-1)*8+1].x, stackCorners[(currStackPos-1)*8+1].y+cellSize, stackCorners[(currStackPos-1)*8+1].z+cellSize); stackCorners[currStackPos*8+2] = make_float3(stackCorners[(currStackPos-1)*8+2].x+cellSize, stackCorners[(currStackPos-1)*8+2].y, stackCorners[(currStackPos-1)*8+2].z+cellSize); stackCorners[currStackPos*8+3] = make_float3(stackCorners[(currStackPos-1)*8+3].x, stackCorners[(currStackPos-1)*8+3].y, stackCorners[(currStackPos-1)*8+3].z+cellSize); stackCorners[currStackPos*8+4] = make_float3(stackCorners[(currStackPos-1)*8+4].x+cellSize, stackCorners[(currStackPos-1)*8+4].y+cellSize, stackCorners[(currStackPos-1)*8+4].z); stackCorners[currStackPos*8+5] = make_float3(stackCorners[(currStackPos-1)*8+5].x, stackCorners[(currStackPos-1)*8+5].y+cellSize, stackCorners[(currStackPos-1)*8+5].z); stackCorners[currStackPos*8+6] = make_float3(stackCorners[(currStackPos-1)*8+6].x+cellSize, stackCorners[(currStackPos-1)*8+6].y, stackCorners[(currStackPos-1)*8+6].z); stackCorners[currStackPos*8+7] = make_float3(stackCorners[(currStackPos-1)*8+7].x, stackCorners[(currStackPos-1)*8+7].y, stackCorners[(currStackPos-1)*8+7].z); // Sample vector field at cell corners (while reusing vals from the last subcell) stackV[currStackPos*8+0] = sampleVecFieldTrilin_D(&stackV[(currStackPos-1)*8], 0.5, 0.5, 0.5); stackV[currStackPos*8+4] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+4], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+2] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+2], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+6] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+6], stackV[(currStackPos-1)*8+7], 0.5); stackV[currStackPos*8+1] = sampleVecFieldBilin_D(stackV[(currStackPos-1)*8+1], stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5, 0.5); stackV[currStackPos*8+3] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+3], stackV[(currStackPos-1)*8+7], 0.5); stackV[currStackPos*8+5] = sampleVecFieldLin_D(stackV[(currStackPos-1)*8+5], stackV[(currStackPos-1)*8+7], 0.5); stackV[currStackPos*8+7] = stackV[(currStackPos-1)*8+7]; } currSubCell[currStackPos] = 0; } else { // Put the center of the current (sub-)cell on the stack cellCoords_D[idx].x = stackCorners[8*currStackPos+0].x + cellSize*0.5; cellCoords_D[idx].y = stackCorners[8*currStackPos+0].y + cellSize*0.5; cellCoords_D[idx].z = stackCorners[8*currStackPos+0].z + cellSize*0.5; done = true; } } else { if(currStackPos > 0) { currStackPos--; cellSize = cellSize*2.0; currSubCell[currStackPos]++; } else { // Field is not vanishing in this cell done = true; } } } } extern "C" { /* SetGridParams */ cudaError_t SetGridParams(uint3 dim_h, float3 org_h, float3 maxCoord_h, float3 spacing_h) { checkCudaErrors(cudaMemcpyToSymbol(dim, &dim_h, sizeof(uint3))); checkCudaErrors(cudaMemcpyToSymbol(org, &org_h, sizeof(float3))); checkCudaErrors(cudaMemcpyToSymbol(maxCoord, &maxCoord_h, sizeof(float3))); checkCudaErrors(cudaMemcpyToSymbol(spacing, &spacing_h, sizeof(float3))); return cudaGetLastError(); } /* SetStreamlineStepsize */ cudaError_t SetStreamlineParams(float stepsize_h, uint maxSteps) { checkCudaErrors(cudaMemcpyToSymbol(streamlinesStep, &stepsize_h, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(streamlinesStepCnt, &maxSteps, sizeof(uint))); return cudaGetLastError(); } /* SetNumberOfPos */ cudaError_t SetNumberOfPos(uint nPos_h) { checkCudaErrors(cudaMemcpyToSymbol(nPos, &nPos_h, sizeof(uint))); return cudaGetLastError(); } /* UpdatePosition */ cudaError_t UpdatePositionRK4( const float *vecField, uint3 dim, float *pos, uint nPos, uint maxIt, bool backward) { uint nThreadsPerBlock = min(512, nPos); uint nBlocks = ceil((float)(nPos)/(float)(nThreadsPerBlock)); float3 *vecField_D, *pos_D; // Allocate device memory checkCudaErrors(cudaMalloc((void **)&vecField_D, sizeof(float)*dim.x*dim.y*dim.z*3)); checkCudaErrors(cudaMalloc((void **)&pos_D, sizeof(float)*nPos*3)); // Copy vec field data to device memory checkCudaErrors(cudaMemcpy(vecField_D, vecField, sizeof(float)*dim.x*dim.y*dim.z*3, cudaMemcpyHostToDevice)); // Copy positions to device memory checkCudaErrors(cudaMemcpy(pos_D, pos, sizeof(float)*nPos*3, cudaMemcpyHostToDevice)); if(backward) { //printf("CUDA streamline integration (backward), max steps %u\n", maxIt); // DEBUG for(uint i = 0; i < maxIt; i++) { // Update position maxIt times UpdatePositionBackwardRK4_D <<< nBlocks, nThreadsPerBlock >>> (vecField_D, pos_D); cudaThreadSynchronize(); } } else { //printf("CUDA streamline integration, max steps %u\n", maxIt); // DEBUG for(uint i = 0; i < maxIt; i++) { // Update position maxIt times UpdatePositionRK4_D <<< nBlocks, nThreadsPerBlock >>> (vecField_D, pos_D); cudaThreadSynchronize(); } } // Copy updated positions back to host memory checkCudaErrors(cudaMemcpy(pos, pos_D, sizeof(float)*nPos*3, cudaMemcpyDeviceToHost)); // Cleanup device memory checkCudaErrors(cudaFree(vecField_D)); checkCudaErrors(cudaFree(pos_D)); return cudaGetLastError(); } /* SearchNullPoints */ cudaError_t SearchNullPoints( const float *vecField, uint3 dim, float3 org, float3 spacing, float *cellCoords, unsigned int maxStackSize) { uint n = (dim.x-1)*(dim.y-1)*(dim.z-1); uint nThreadsPerBlock = min(512, n); uint nBlocks = ceil((float)(n)/(float)(nThreadsPerBlock)); float3 *vecField_D, *cellCoords_D; // Allocate device memory checkCudaErrors(cudaMalloc((void **)&vecField_D, sizeof(float)*dim.x*dim.y*dim.z*3)); checkCudaErrors(cudaMalloc((void **)&cellCoords_D, sizeof(float)*n*3)); // Copy vec field data to device memory checkCudaErrors(cudaMemcpy(vecField_D, vecField, sizeof(float)*dim.x*dim.y*dim.z*3, cudaMemcpyHostToDevice)); // Calculate cell coordinates of the critical points calcCellCoords_D2 <<< nBlocks, nThreadsPerBlock >>> (vecField_D, cellCoords_D); // Copy cell coords back to host memory checkCudaErrors(cudaMemcpy(cellCoords, cellCoords_D, sizeof(float)*n*3, cudaMemcpyDeviceToHost)); // Cleanup device memory checkCudaErrors(cudaFree(vecField_D)); checkCudaErrors(cudaFree(cellCoords_D)); return cudaGetLastError(); } } // Streamline integration ////////////////////////////////////////////////////// /** * Calculates the gradient field of a given scalar field. * * @param[in] scalarField_D The scalar field (device memory) * @param[out] gradientField_D The gradient field (device memory) */ __global__ void CalcGradient_D(float *scalarDield_D, float *gradientField_D) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; // Get coordinates of the lower/left/back corner // uint3 c; // c.x = idx%(dim.x-1); // c.y = (idx/(dim.x-1))%(dim.y-1); // c.z = (idx/(dim.x-1))/(dim.y-1); uint3 c; c.x = idx%dim.x; c.y = (idx/dim.x)%dim.y; c.z = (idx/dim.x)/dim.y; // Omit border cells if (c.x == 0) { return; } if (c.y == 0) { return; } if (c.z == 0) { return; } if (c.x >= dim.x-1) { return; } if (c.y >= dim.y-1) { return; } if (c.z >= dim.z-1) { return; } float3 gradient; gradient.x = scalarDield_D[dim.x*(dim.y*c.z+c.y)+c.x+1]; gradient.y = scalarDield_D[dim.x*(dim.y*c.z+c.y+1)+c.x]; gradient.z = scalarDield_D[dim.x*(dim.y*(c.z+1)+c.y)+c.x]; gradient.x -= scalarDield_D[dim.x*(dim.y*c.z+c.y)+c.x-1]; gradient.y -= scalarDield_D[dim.x*(dim.y*c.z+c.y-1)+c.x]; gradient.z -= scalarDield_D[dim.x*(dim.y*(c.z-1)+c.y)+c.x]; gradient = normalize(gradient); gradientField_D[3*idx+0] = gradient.x; gradientField_D[3*idx+1] = gradient.y; gradientField_D[3*idx+2] = gradient.z; } extern "C" cudaError_t CalcGradient(float *scalarDield_D, float *gradientField_D, uint volsize) { #ifdef USE_TIMER float dt_ms; cudaEvent_t event1, event2; cudaEventCreate(&event1); cudaEventCreate(&event2); cudaEventRecord(event1, 0); #endif const int threadsPerBlock = 256; const int blocksPerGrid = (volsize + threadsPerBlock - 1) / threadsPerBlock; dim3 grid(blocksPerGrid, 1, 1); // Calculate gradient of the scalar field CalcGradient_D <<< grid, threadsPerBlock >>> ( scalarDield_D, gradientField_D); #ifdef USE_TIMER cudaEventRecord(event2, 0); cudaEventSynchronize(event1); cudaEventSynchronize(event2); cudaEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time 'CalcGradient_D': %.10f sec\n", dt_ms/1000.0f); #endif return cudaGetLastError(); } /** TODO */ __global__ void InitStartPos_D(float *vertexDataBuffer_D, float *streamlinePos_D, uint vertexDataBufferStride, uint vertexDataBufferOffsPos) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > nPos) { return; } streamlinePos_D[idx*streamlinesStepCnt*6+0] = vertexDataBuffer_D[idx*vertexDataBufferStride+vertexDataBufferOffsPos+0]; streamlinePos_D[idx*streamlinesStepCnt*6+1] = vertexDataBuffer_D[idx*vertexDataBufferStride+vertexDataBufferOffsPos+1]; streamlinePos_D[idx*streamlinesStepCnt*6+2] = vertexDataBuffer_D[idx*vertexDataBufferStride+vertexDataBufferOffsPos+2]; } cudaError_t InitStartPos(float *vertexDataBuffer_D, float *streamlinePos_D, uint vertexDataBufferStride, uint vertexDataBufferOffsPos, uint vertexCnt) { #ifdef USE_TIMER float dt_ms; cudaEvent_t event1, event2; cudaEventCreate(&event1); cudaEventCreate(&event2); cudaEventRecord(event1, 0); #endif const int threadsPerBlock = 256; const int blocksPerGrid = (vertexCnt + threadsPerBlock - 1) / threadsPerBlock; dim3 grid(blocksPerGrid, 1, 1); // Calculate gradient of the scalar field InitStartPos_D <<< grid, threadsPerBlock >>> ( vertexDataBuffer_D, streamlinePos_D, vertexDataBufferStride, vertexDataBufferOffsPos); #ifdef USE_TIMER cudaEventRecord(event2, 0); cudaEventSynchronize(event1); cudaEventSynchronize(event2); cudaEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time 'InitStartPos_D': %.10f sec\n", dt_ms/1000.0f); #endif return cudaGetLastError(); } /** TODO */ __global__ void UpdateStreamlinePos_D(float *streamlinePos_D, float3 *gradientField_D, uint step) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > nPos) { return; } float3 currPos; currPos.x = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+0]; currPos.y = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+1]; currPos.z = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+2]; float3 x0, x1, x2, x3, v0, v1, v2, v3; v0 = make_float3(0.0, 0.0, 0.0); v1 = make_float3(0.0, 0.0, 0.0); v2 = make_float3(0.0, 0.0, 0.0); v3 = make_float3(0.0, 0.0, 0.0); x0 = currPos; v0 = normalize(sampleVecFieldAtTrilinNorm_D(x0, gradientField_D)); v0 *= streamlinesStep; x1 = x0 + 0.5*v0; if(isValidGridPos_D(x1)) { v1 = normalize(sampleVecFieldAtTrilinNorm_D(x1, gradientField_D)); v1 *= streamlinesStep; } x2 = x0 + 0.5f*v1; if(isValidGridPos_D(x2)) { v2 = normalize(sampleVecFieldAtTrilinNorm_D(x2, gradientField_D)); v2 *= streamlinesStep; } x3 = x0 + v2; if(isValidGridPos_D(x3)) { v3 = normalize(sampleVecFieldAtTrilinNorm_D(x3, gradientField_D)); v3 *= streamlinesStep; } x0 += (1.0f/6.0f)*(v0+2.0f*v1+2.0f*v2+v3); // Copy position to streamline position array if it is valid if(isValidGridPos_D(x0)) { streamlinePos_D[idx*streamlinesStepCnt*6+step*6+3] = x0.x; streamlinePos_D[idx*streamlinesStepCnt*6+step*6+4] = x0.y; streamlinePos_D[idx*streamlinesStepCnt*6+step*6+5] = x0.z; } else { streamlinePos_D[idx*streamlinesStepCnt*6+step*6+3] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+0]; streamlinePos_D[idx*streamlinesStepCnt*6+step*6+4] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+1]; streamlinePos_D[idx*streamlinesStepCnt*6+step*6+5] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+2]; } // Copy new position to the next line segment streamlinePos_D[idx*streamlinesStepCnt*6+(step+1)*6+0] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+3]; streamlinePos_D[idx*streamlinesStepCnt*6+(step+1)*6+1] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+4]; streamlinePos_D[idx*streamlinesStepCnt*6+(step+1)*6+2] = streamlinePos_D[idx*streamlinesStepCnt*6+step*6+5]; } cudaError_t UpdateStreamlinePos(float *streamlinePos_D,float *gradientField_D, uint vertexCnt, uint step) { #ifdef USE_TIMER float dt_ms; cudaEvent_t event1, event2; cudaEventCreate(&event1); cudaEventCreate(&event2); cudaEventRecord(event1, 0); #endif const int threadsPerBlock = 256; const int blocksPerGrid = (vertexCnt + threadsPerBlock - 1) / threadsPerBlock; dim3 grid(blocksPerGrid, 1, 1); // Calculate gradient of the scalar field UpdateStreamlinePos_D <<< grid, threadsPerBlock >>> ( streamlinePos_D, (float3*)(gradientField_D), step); #ifdef USE_TIMER cudaEventRecord(event2, 0); cudaEventSynchronize(event1); cudaEventSynchronize(event2); cudaEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time 'UpdateStreamlinePos_D': %.10f sec\n", dt_ms/1000.0f); #endif return cudaGetLastError(); }
4c764494c8eb9933bcfc3e333785248b08ff99bf.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <float.h> extern "C" { #include "../../../../includes/vectors.h" #include "../../../../includes/rt.h" #include "../../cudaheader/gpu_rt.h" } __host__ __device__ t_color handle_reflection_gpu(t_world world, t_ray *ray, t_intersection *intersection, int *flag) { t_intersection intersection_tmp; t_ray ray_tmp; new_intersection(&intersection_tmp); intersection_tmp.id = intersection->id; intersection_tmp.depth = intersection->depth + 1; ray_tmp.origin = intersection->pos; ray_tmp.dir = vector_normalize(vector_substract(ray->dir, vector_scalar(intersection->normal_v, 2 * vector_dot(ray->dir, intersection->normal_v)))); get_closest_intersection(world, ray_tmp, &intersection_tmp); if (intersection_tmp.type == '0') return ((t_color){0, 0, 0}); else { *intersection = intersection_tmp; *ray = ray_tmp; *flag = 0; return (intersection_tmp.color); } } __host__ __device__ t_color handle_transparence_gpu(t_world world, t_ray *ray, t_intersection *intersection, int *flag) { t_intersection intersection_tmp; t_ray ray_tmp; new_intersection(&intersection_tmp); intersection_tmp.id = intersection->id; intersection_tmp.pos = intersection->pos; intersection_tmp.depth = intersection->depth + 1; ray_tmp.origin = intersection->pos; ray_tmp.dir = ray->dir; get_closest_intersection(world, ray_tmp, &intersection_tmp); if (intersection_tmp.type == '0') return ((t_color){0, 0, 0}); else { *intersection = intersection_tmp; *ray = ray_tmp; *flag = 0; return (intersection_tmp.color); } } __host__ __device__ t_color handle_refraction_gpu(t_world world, t_ray *ray, t_intersection *intersection, int *flag) { t_intersection intersection_tmp; t_ray ray_tmp; double etai; double cosi; double eta; double etat; double k; t_vec3d n; new_intersection(&intersection_tmp); intersection_tmp.id = intersection->id; intersection_tmp.pos = intersection->pos; intersection_tmp.depth = intersection->depth + 1; ray_tmp.origin = intersection->pos; cosi = clamp(-1, 1, vector_dot(ray->dir, intersection->normal_v)); etai = 1.0; etat = intersection->refraction_coef + 1.; n = intersection->normal_v; if (cosi < 0) cosi = -cosi; else { swap_double(&etai, &etat); n = vector_scalar(n, -1); } eta = etai / etat; k = 1.0 - eta * eta * (1.0 - cosi * cosi); if (k < 0) return (intersection->color); ray_tmp.dir = vector_add(vector_scalar(ray->dir, eta), vector_scalar(n, (eta * cosi - sqrt(k)))); if (get_closest_intersection(world, ray_tmp, &intersection_tmp) == 0) return ((t_color){0, 0, 0}); else { *intersection = intersection_tmp; *ray = ray_tmp; *flag = 0; return (intersection_tmp.color); } }
4c764494c8eb9933bcfc3e333785248b08ff99bf.cu
#include <cuda.h> #include <stdio.h> #include <float.h> extern "C" { #include "../../../../includes/vectors.h" #include "../../../../includes/rt.h" #include "../../cudaheader/gpu_rt.h" } __host__ __device__ t_color handle_reflection_gpu(t_world world, t_ray *ray, t_intersection *intersection, int *flag) { t_intersection intersection_tmp; t_ray ray_tmp; new_intersection(&intersection_tmp); intersection_tmp.id = intersection->id; intersection_tmp.depth = intersection->depth + 1; ray_tmp.origin = intersection->pos; ray_tmp.dir = vector_normalize(vector_substract(ray->dir, vector_scalar(intersection->normal_v, 2 * vector_dot(ray->dir, intersection->normal_v)))); get_closest_intersection(world, ray_tmp, &intersection_tmp); if (intersection_tmp.type == '0') return ((t_color){0, 0, 0}); else { *intersection = intersection_tmp; *ray = ray_tmp; *flag = 0; return (intersection_tmp.color); } } __host__ __device__ t_color handle_transparence_gpu(t_world world, t_ray *ray, t_intersection *intersection, int *flag) { t_intersection intersection_tmp; t_ray ray_tmp; new_intersection(&intersection_tmp); intersection_tmp.id = intersection->id; intersection_tmp.pos = intersection->pos; intersection_tmp.depth = intersection->depth + 1; ray_tmp.origin = intersection->pos; ray_tmp.dir = ray->dir; get_closest_intersection(world, ray_tmp, &intersection_tmp); if (intersection_tmp.type == '0') return ((t_color){0, 0, 0}); else { *intersection = intersection_tmp; *ray = ray_tmp; *flag = 0; return (intersection_tmp.color); } } __host__ __device__ t_color handle_refraction_gpu(t_world world, t_ray *ray, t_intersection *intersection, int *flag) { t_intersection intersection_tmp; t_ray ray_tmp; double etai; double cosi; double eta; double etat; double k; t_vec3d n; new_intersection(&intersection_tmp); intersection_tmp.id = intersection->id; intersection_tmp.pos = intersection->pos; intersection_tmp.depth = intersection->depth + 1; ray_tmp.origin = intersection->pos; cosi = clamp(-1, 1, vector_dot(ray->dir, intersection->normal_v)); etai = 1.0; etat = intersection->refraction_coef + 1.; n = intersection->normal_v; if (cosi < 0) cosi = -cosi; else { swap_double(&etai, &etat); n = vector_scalar(n, -1); } eta = etai / etat; k = 1.0 - eta * eta * (1.0 - cosi * cosi); if (k < 0) return (intersection->color); ray_tmp.dir = vector_add(vector_scalar(ray->dir, eta), vector_scalar(n, (eta * cosi - sqrt(k)))); if (get_closest_intersection(world, ray_tmp, &intersection_tmp) == 0) return ((t_color){0, 0, 0}); else { *intersection = intersection_tmp; *ray = ray_tmp; *flag = 0; return (intersection_tmp.color); } }
c6ee29682c7bffe29d40c4ba5f60f5c48230c147.hip
// !!! This is a file automatically generated by hipify!!! /* * sp_dec.c * * * Project: * AMR Floating-Point Codec * * Contains: * This module contains all the functions needed decoding AMR * encoder parameters to 16-bit speech samples * */ /* * include files */ #include <stdio.h> #include <stdlib.h> #include <memory.h> #include <math.h> #include <float.h> #include "typedef.h" #include "dec.h" #include "hip/hip_runtime.h" #define PRMNO_MR475 17 #define PRMNO_MR515 19 #define PRMNO_MR59 19 #define PRMNO_MR67 19 #define PRMNO_MR74 19 #define PRMNO_MR795 23 #define PRMNO_MR102 39 #define PRMNO_MR122 57 #define PRMNO_MRDTX 5 /* * tables */ __device__ static const UWord8 block_size[16] = { 13, 14, 16, 18, 20, 21, 27, 32, 6 , 0 , 0 , 0 , 0 , 0 , 0 , 1 }; __device__ static const UWord8 toc_byte[16] = { 0x04, 0x0C, 0x14, 0x1C, 0x24, 0x2C, 0x34, 0x3C, 0x44, 0x4C, 0x54, 0x5C, 0x64, 0x6C, 0x74, 0x7C }; /* Subjective importance of the speech encoded bits */ __device__ static Word16 order_MR475[] = { 0, 0x80, 0, 0x40, 0, 0x20, 0, 0x10, 0, 0x8, 0, 0x4, 0, 0x2, 0, 0x1, 1, 0x80, 1, 0x40, 1, 0x20, 1, 0x10, 1, 0x8, 1, 0x4, 1, 0x2, 1, 0x1, 3, 0x80, 3, 0x40, 3, 0x20, 3, 0x10, 3, 0x8, 3, 0x4, 7, 0x8, 7, 0x4, 10, 0x8, 10, 0x4, 14, 0x8, 14, 0x4, 6, 0x1, 6, 0x2, 6, 0x4, 6, 0x8, 13, 0x1, 13, 0x2, 13, 0x4, 13, 0x8, 2, 0x20, 2, 0x10, 2, 0x4, 2, 0x1, 13, 0x10, 13, 0x20, 13, 0x40, 13, 0x80, 3, 0x2, 3, 0x1, 6, 0x10, 6, 0x20, 6, 0x40, 6, 0x80, 5, 0x2, 5, 0x1, 2, 0x40, 2, 0x8, 2, 0x2, 7, 0x2, 7, 0x1, 9, 0x2, 9, 0x1, 10, 0x2, 10, 0x1, 12, 0x2, 12, 0x1, 14, 0x2, 14, 0x1, 16, 0x2, 16, 0x1, 4, 0x20, 4, 0x10, 4, 0x4, 4, 0x2, 8, 0x20, 8, 0x10, 8, 0x4, 8, 0x2, 11, 0x20, 11, 0x10, 11, 0x4, 11, 0x2, 15, 0x20, 15, 0x10, 15, 0x4, 15, 0x2, 4, 0x8, 8, 0x8, 11, 0x8, 15, 0x8, 4, 0x1, 8, 0x1, 11, 0x1, 15, 0x1, 4, 0x40, 8, 0x40, 11, 0x40, 15, 0x40 }; __device__ static Word16 order_MR515[] = { 0, 0x1, 0, 0x2, 0, 0x4, 0, 0x8, 0, 0x10, 0, 0x20, 0, 0x40, 0, 0x80, 1, 0x1, 1, 0x2, 1, 0x4, 1, 0x8, 1, 0x10, 1, 0x20, 1, 0x40, 1, 0x80, 3, 0x80, 3, 0x40, 3, 0x20, 3, 0x10, 3, 0x8, 7, 0x8, 11, 0x8, 15, 0x8, 6, 0x1, 6, 0x2, 6, 0x4, 10, 0x1, 10, 0x2, 10, 0x4, 14, 0x1, 14, 0x2, 14, 0x4, 18, 0x1, 18, 0x2, 18, 0x4, 6, 0x8, 10, 0x8, 14, 0x8, 18, 0x8, 3, 0x4, 7, 0x4, 11, 0x4, 15, 0x4, 2, 0x10, 6, 0x10, 10, 0x10, 14, 0x10, 18, 0x10, 3, 0x2, 7, 0x2, 11, 0x2, 2, 0x20, 2, 0x4, 2, 0x1, 6, 0x20, 10, 0x20, 14, 0x20, 18, 0x20, 2, 0x2, 3, 0x1, 7, 0x1, 11, 0x1, 15, 0x2, 2, 0x8, 2, 0x40, 15, 0x1, 5, 0x1, 5, 0x2, 9, 0x1, 9, 0x2, 13, 0x1, 4, 0x4, 8, 0x4, 12, 0x4, 16, 0x4, 13, 0x2, 17, 0x1, 17, 0x2, 4, 0x2, 8, 0x2, 12, 0x2, 16, 0x2, 4, 0x20, 8, 0x20, 4, 0x10, 8, 0x10, 12, 0x20, 12, 0x10, 16, 0x20, 16, 0x10, 4, 0x40, 8, 0x40, 12, 0x40, 16, 0x40, 4, 0x1, 8, 0x1, 12, 0x1, 16, 0x1, 4, 0x8, 8, 0x8, 12, 0x8, 16, 0x8 }; __device__ static Word16 order_MR59[] = { 0, 0x80, 0, 0x40, 0, 0x8, 0, 0x4, 0, 0x10, 0, 0x2, 0, 0x1, 0, 0x20, 1, 0x8, 1, 0x2, 1, 0x100, 1, 0x80, 1, 0x20, 1, 0x10, 1, 0x4, 1, 0x40, 1, 0x1, 3, 0x20, 11, 0x20, 3, 0x10, 11, 0x10, 3, 0x40, 11, 0x40, 3, 0x80, 11, 0x80, 3, 0x8, 11, 0x8, 7, 0x8, 15, 0x8, 6, 0x1, 10, 0x1, 14, 0x1, 18, 0x1, 3, 0x4, 11, 0x4, 7, 0x4, 15, 0x4, 6, 0x2, 10, 0x2, 14, 0x2, 18, 0x2, 7, 0x2, 15, 0x2, 3, 0x2, 11, 0x2, 3, 0x1, 11, 0x1, 6, 0x4, 10, 0x4, 14, 0x4, 18, 0x4, 6, 0x8, 10, 0x8, 14, 0x8, 18, 0x8, 6, 0x10, 10, 0x10, 14, 0x10, 18, 0x10, 2, 0x40, 2, 0x10, 2, 0x4, 2, 0x8, 2, 0x80, 2, 0x100, 2, 0x20, 2, 0x2, 17, 0x1, 5, 0x2, 13, 0x2, 17, 0x2, 9, 0x2, 9, 0x1, 5, 0x1, 13, 0x1, 2, 0x1, 6, 0x20, 10, 0x20, 14, 0x20, 18, 0x20, 7, 0x1, 15, 0x1, 4, 0x4, 8, 0x4, 12, 0x4, 16, 0x4, 4, 0x8, 8, 0x8, 12, 0x8, 16, 0x8, 4, 0x40, 8, 0x40, 12, 0x40, 16, 0x40, 4, 0x80, 8, 0x80, 12, 0x80, 16, 0x80, 4, 0x100, 8, 0x100, 12, 0x100, 16, 0x100, 4, 0x1, 8, 0x1, 12, 0x1, 16, 0x1, 4, 0x2, 8, 0x2, 12, 0x2, 16, 0x2, 4, 0x10, 8, 0x10, 12, 0x10, 16, 0x10, 4, 0x20, 8, 0x20, 12, 0x20, 16, 0x20 }; __device__ static Word16 order_MR67[] = { 0, 0x80, 0, 0x40, 0, 0x8, 0, 0x10, 0, 0x4, 0, 0x2, 1, 0x8, 0, 0x1, 0, 0x20, 1, 0x100, 1, 0x80, 1, 0x20, 1, 0x2, 1, 0x10, 1, 0x4, 1, 0x40, 3, 0x20, 11, 0x20, 3, 0x10, 11, 0x10, 3, 0x40, 11, 0x40, 3, 0x80, 11, 0x80, 3, 0x8, 11, 0x8, 1, 0x1, 7, 0x8, 15, 0x8, 7, 0x4, 15, 0x4, 3, 0x4, 11, 0x4, 7, 0x2, 15, 0x2, 6, 0x40, 10, 0x40, 14, 0x40, 18, 0x40, 3, 0x2, 11, 0x2, 6, 0x8, 10, 0x8, 14, 0x8, 18, 0x8, 6, 0x4, 10, 0x4, 14, 0x4, 18, 0x4, 7, 0x1, 15, 0x1, 3, 0x1, 11, 0x1, 2, 0x40, 2, 0x4, 6, 0x2, 10, 0x2, 14, 0x2, 18, 0x2, 2, 0x10, 2, 0x8, 2, 0x80, 2, 0x100, 2, 0x20, 2, 0x2, 2, 0x1, 6, 0x10, 10, 0x10, 14, 0x10, 18, 0x10, 5, 0x1, 9, 0x1, 13, 0x1, 17, 0x1, 6, 0x1, 10, 0x1, 14, 0x1, 18, 0x1, 5, 0x2, 9, 0x2, 13, 0x2, 17, 0x2, 18, 0x20, 14, 0x20, 10, 0x20, 6, 0x20, 5, 0x4, 9, 0x4, 13, 0x4, 17, 0x4, 4, 0x4, 8, 0x4, 12, 0x4, 16, 0x4, 4, 0x20, 8, 0x20, 12, 0x20, 16, 0x20, 4, 0x40, 8, 0x40, 12, 0x40, 16, 0x40, 4, 0x200, 8, 0x200, 12, 0x200, 16, 0x200, 4, 0x400, 8, 0x400, 12, 0x400, 16, 0x400, 4, 0x1, 8, 0x1, 12, 0x1, 16, 0x1, 4, 0x2, 8, 0x2, 12, 0x2, 16, 0x2, 4, 0x8, 8, 0x8, 12, 0x8, 16, 0x8, 4, 0x10, 8, 0x10, 12, 0x10, 16, 0x10, 4, 0x80, 8, 0x80, 12, 0x80, 16, 0x80, 4, 0x100, 8, 0x100, 12, 0x100, 16, 0x100 }; __device__ static Word16 order_MR74[] = { 0, 0x80, 0, 0x40, 0, 0x20, 0, 0x10, 0, 0x8, 0, 0x4, 0, 0x2, 0, 0x1, 1, 0x100, 1, 0x80, 1, 0x40, 1, 0x20, 1, 0x10, 1, 0x8, 1, 0x4, 1, 0x2, 1, 0x1, 3, 0x80, 11, 0x80, 3, 0x40, 11, 0x40, 3, 0x20, 11, 0x20, 3, 0x10, 11, 0x10, 3, 0x8, 11, 0x8, 6, 0x40, 10, 0x40, 14, 0x40, 18, 0x40, 6, 0x20, 10, 0x20, 14, 0x20, 18, 0x20, 6, 0x8, 10, 0x8, 14, 0x8, 18, 0x8, 6, 0x4, 10, 0x4, 14, 0x4, 18, 0x4, 7, 0x10, 15, 0x10, 7, 0x8, 15, 0x8, 2, 0x10, 2, 0x8, 2, 0x4, 2, 0x100, 2, 0x80, 2, 0x40, 3, 0x4, 7, 0x4, 11, 0x4, 15, 0x4, 6, 0x2, 10, 0x2, 14, 0x2, 18, 0x2, 2, 0x20, 2, 0x2, 2, 0x1, 5, 0x1, 9, 0x1, 13, 0x1, 17, 0x1, 6, 0x1, 10, 0x1, 14, 0x1, 18, 0x1, 5, 0x2, 9, 0x2, 13, 0x2, 17, 0x2, 5, 0x4, 9, 0x4, 6, 0x10, 10, 0x10, 14, 0x10, 18, 0x10, 13, 0x4, 17, 0x4, 5, 0x8, 9, 0x8, 13, 0x8, 17, 0x8, 3, 0x2, 3, 0x1, 7, 0x2, 7, 0x1, 11, 0x2, 11, 0x1, 15, 0x2, 15, 0x1, 4, 0x20, 4, 0x10, 4, 0x8, 4, 0x4, 4, 0x2, 4, 0x1, 8, 0x20, 8, 0x10, 8, 0x8, 8, 0x4, 8, 0x2, 8, 0x1, 12, 0x20, 12, 0x10, 12, 0x8, 12, 0x4, 12, 0x2, 12, 0x1, 16, 0x20, 16, 0x10, 16, 0x8, 16, 0x4, 16, 0x2, 16, 0x1, 4, 0x1000, 8, 0x1000, 12, 0x1000, 16, 0x1000, 4, 0x800, 8, 0x800, 12, 0x800, 16, 0x800, 4, 0x400, 8, 0x400, 12, 0x400, 16, 0x400, 4, 0x200, 8, 0x200, 12, 0x200, 16, 0x200, 4, 0x100, 8, 0x100, 12, 0x100, 16, 0x100, 4, 0x80, 8, 0x80, 12, 0x80, 16, 0x80, 4, 0x40, 8, 0x40, 12, 0x40, 16, 0x40 }; __device__ static Word16 order_MR795[] = { 0, 0x1, 0, 0x2, 0, 0x4, 0, 0x8, 0, 0x10, 0, 0x20, 0, 0x40, 1, 0x8, 1, 0x2, 1, 0x100, 1, 0x80, 1, 0x20, 1, 0x10, 1, 0x4, 1, 0x40, 1, 0x1, 2, 0x40, 2, 0x10, 2, 0x4, 2, 0x8, 2, 0x80, 2, 0x100, 2, 0x20, 7, 0x10, 12, 0x10, 17, 0x10, 22, 0x10, 7, 0x8, 12, 0x8, 17, 0x8, 22, 0x8, 7, 0x4, 12, 0x4, 17, 0x4, 22, 0x4, 6, 0x8, 11, 0x8, 16, 0x8, 21, 0x8, 6, 0x4, 11, 0x4, 16, 0x4, 21, 0x4, 3, 0x80, 13, 0x80, 3, 0x40, 13, 0x40, 3, 0x20, 13, 0x20, 3, 0x10, 13, 0x10, 3, 0x8, 13, 0x8, 8, 0x20, 18, 0x20, 8, 0x10, 18, 0x10, 8, 0x8, 18, 0x8, 7, 0x2, 12, 0x2, 17, 0x2, 22, 0x2, 3, 0x4, 13, 0x4, 8, 0x4, 18, 0x4, 0, 0x80, 0, 0x100, 2, 0x2, 2, 0x1, 3, 0x2, 13, 0x2, 3, 0x1, 13, 0x1, 8, 0x2, 18, 0x2, 8, 0x1, 18, 0x1, 6, 0x2, 11, 0x2, 16, 0x2, 21, 0x2, 7, 0x1, 12, 0x1, 17, 0x1, 22, 0x1, 6, 0x1, 11, 0x1, 16, 0x1, 21, 0x1, 15, 0x1, 15, 0x2, 15, 0x4, 4, 0x2, 9, 0x2, 14, 0x2, 19, 0x2, 4, 0x10, 9, 0x10, 14, 0x10, 19, 0x10, 4, 0x80, 9, 0x80, 14, 0x80, 19, 0x80, 4, 0x800, 9, 0x800, 14, 0x800, 19, 0x800, 15, 0x8, 20, 0x1, 20, 0x2, 20, 0x4, 20, 0x8, 10, 0x1, 10, 0x2, 10, 0x4, 10, 0x8, 5, 0x1, 5, 0x2, 5, 0x4, 5, 0x8, 4, 0x1, 4, 0x4, 4, 0x8, 4, 0x20, 4, 0x100, 4, 0x1000, 9, 0x1, 9, 0x4, 9, 0x8, 9, 0x20, 9, 0x100, 9, 0x1000, 14, 0x1, 14, 0x4, 14, 0x8, 14, 0x20, 14, 0x100, 14, 0x1000, 19, 0x1, 19, 0x4, 19, 0x8, 19, 0x20, 19, 0x100, 19, 0x1000, 4, 0x40, 9, 0x40, 14, 0x40, 19, 0x40, 4, 0x400, 9, 0x400, 14, 0x400, 19, 0x400, 4, 0x200, 9, 0x200, 14, 0x200, 19, 0x200, 0, 0x1, 0, 0x2, 0, 0x4, 0, 0x8, 0, 0x10, 0, 0x20, 0, 0x40, 1, 0x8, 1, 0x2, 1, 0x100, 1, 0x80, 1, 0x20, 1, 0x10, 1, 0x4, 1, 0x40, 1, 0x1, 2, 0x40, 2, 0x10, 2, 0x4, 2, 0x8, 2, 0x80, 2, 0x100, 2, 0x20, 7, 0x10, 12, 0x10, 17, 0x10, 22, 0x10, 7, 0x8, 12, 0x8, 17, 0x8, 22, 0x8, 7, 0x4, 12, 0x4, 17, 0x4, 22, 0x4, 6, 0x8, 11, 0x8, 16, 0x8, 21, 0x8, 6, 0x4, 11, 0x4, 16, 0x4, 21, 0x4, 3, 0x80, 13, 0x80, 3, 0x40, 13, 0x40, 3, 0x20, 13, 0x20, 3, 0x10, 13, 0x10, 3, 0x8, 13, 0x8, 8, 0x20, 18, 0x20, 8, 0x10, 18, 0x10, 8, 0x8, 18, 0x8, 7, 0x2, 12, 0x2, 17, 0x2, 22, 0x2, 3, 0x4, 13, 0x4, 8, 0x4, 18, 0x4, 0, 0x80, 0, 0x100, 2, 0x2, 2, 0x1, 3, 0x2, 13, 0x2, 3, 0x1, 13, 0x1, 8, 0x2, 18, 0x2, 8, 0x1, 18, 0x1, 6, 0x2, 11, 0x2, 16, 0x2, 21, 0x2, 7, 0x1, 12, 0x1, 17, 0x1, 22, 0x1, 6, 0x1, 11, 0x1, 16, 0x1, 21, 0x1, 15, 0x1, 15, 0x2, 15, 0x4, 4, 0x2, 9, 0x2, 14, 0x2, 19, 0x2, 4, 0x10, 9, 0x10, 14, 0x10, 19, 0x10, 4, 0x80, 9, 0x80, 14, 0x80, 19, 0x80, 4, 0x800, 9, 0x800, 14, 0x800, 19, 0x800, 15, 0x8, 20, 0x1, 20, 0x2, 20, 0x4, 20, 0x8, 10, 0x1, 10, 0x2, 10, 0x4, 10, 0x8, 5, 0x1, 5, 0x2, 5, 0x4, 5, 0x8, 4, 0x1, 4, 0x4, 4, 0x8, 4, 0x20, 4, 0x100, 4, 0x1000, 9, 0x1, 9, 0x4, 9, 0x8, 9, 0x20, 9, 0x100, 9, 0x1000, 14, 0x1, 14, 0x4, 14, 0x8, 14, 0x20, 14, 0x100, 14, 0x1000, 19, 0x1, 19, 0x4, 19, 0x8, 19, 0x20, 19, 0x100, 19, 0x1000, 4, 0x40, 9, 0x40, 14, 0x40, 19, 0x40, 4, 0x400, 9, 0x400, 14, 0x400, 19, 0x400, 4, 0x200, 9, 0x200, 14, 0x200, 19, 0x200 }; __device__ static Word16 order_MR102[] = { 0, 0x1, 0, 0x2, 0, 0x4, 0, 0x8, 0, 0x10, 0, 0x20, 0, 0x40, 0, 0x80, 1, 0x1, 1, 0x2, 1, 0x4, 1, 0x8, 1, 0x10, 1, 0x20, 1, 0x40, 1, 0x80, 1, 0x100, 3, 0x80, 3, 0x40, 3, 0x20, 3, 0x10, 3, 0x8, 3, 0x4, 21, 0x80, 21, 0x40, 21, 0x20, 21, 0x10, 21, 0x8, 21, 0x4, 12, 0x10, 12, 0x8, 30, 0x10, 30, 0x8, 11, 0x40, 11, 0x8, 11, 0x4, 20, 0x40, 20, 0x8, 20, 0x4, 29, 0x40, 29, 0x8, 29, 0x4, 38, 0x40, 38, 0x8, 38, 0x4, 3, 0x2, 3, 0x1, 21, 0x2, 21, 0x1, 12, 0x4, 12, 0x2, 30, 0x4, 30, 0x2, 11, 0x20, 20, 0x20, 29, 0x20, 38, 0x20, 2, 0x40, 2, 0x4, 2, 0x10, 2, 0x8, 2, 0x80, 2, 0x100, 2, 0x20, 2, 0x2, 2, 0x1, 7, 0x1, 6, 0x1, 5, 0x1, 4, 0x1, 16, 0x1, 15, 0x1, 14, 0x1, 13, 0x1, 25, 0x1, 24, 0x1, 23, 0x1, 22, 0x1, 34, 0x1, 33, 0x1, 32, 0x1, 31, 0x1, 11, 0x2, 11, 0x10, 11, 0x1, 20, 0x2, 20, 0x10, 20, 0x1, 29, 0x2, 29, 0x10, 29, 0x1, 38, 0x2, 38, 0x10, 38, 0x1, 12, 0x1, 30, 0x1, 17, 0x200, 17, 0x100, 18, 0x100, 18, 0x200, 18, 0x80, 17, 0x80, 18, 0x20, 17, 0x20, 17, 0x40, 18, 0x40, 19, 0x40, 19, 0x20, 18, 0x10, 19, 0x8, 17, 0x10, 19, 0x10, 17, 0x8, 18, 0x8, 26, 0x200, 26, 0x100, 27, 0x100, 27, 0x200, 27, 0x80, 26, 0x80, 27, 0x20, 26, 0x20, 26, 0x40, 27, 0x40, 28, 0x40, 28, 0x20, 27, 0x10, 28, 0x8, 26, 0x10, 28, 0x10, 26, 0x8, 27, 0x8, 35, 0x200, 35, 0x100, 36, 0x100, 36, 0x200, 36, 0x80, 35, 0x80, 36, 0x20, 35, 0x20, 35, 0x40, 36, 0x40, 37, 0x40, 37, 0x20, 36, 0x10, 37, 0x8, 35, 0x10, 37, 0x10, 35, 0x8, 36, 0x8, 8, 0x200, 8, 0x100, 9, 0x100, 9, 0x200, 9, 0x80, 8, 0x80, 9, 0x20, 8, 0x20, 8, 0x40, 9, 0x40, 10, 0x40, 10, 0x20, 9, 0x10, 10, 0x8, 8, 0x10, 10, 0x10, 8, 0x8, 9, 0x8, 37, 0x4, 35, 0x1, 36, 0x1, 37, 0x1, 35, 0x4, 37, 0x2, 35, 0x2, 36, 0x4, 36, 0x2, 28, 0x4, 26, 0x1, 27, 0x1, 28, 0x1, 26, 0x4, 28, 0x2, 26, 0x2, 27, 0x4, 27, 0x2, 19, 0x4, 17, 0x1, 18, 0x1, 19, 0x1, 17, 0x4, 19, 0x2, 17, 0x2, 18, 0x4, 18, 0x2, 10, 0x4, 8, 0x1, 9, 0x1, 10, 0x1, 8, 0x4, 10, 0x2, 8, 0x2, 9, 0x4, 9, 0x2 }; __device__ static Word16 order_MR122[] = { 0, 0x40, 0, 0x20, 0, 0x10, 0, 0x8, 0, 0x4, 0, 0x2, 0, 0x1, 1, 0x80, 1, 0x40, 1, 0x20, 1, 0x10, 1, 0x8, 1, 0x4, 1, 0x2, 1, 0x1, 2, 0x1, 2, 0x100, 2, 0x80, 2, 0x40, 2, 0x20, 2, 0x10, 2, 0x8, 2, 0x4, 2, 0x2, 3, 0x80, 3, 0x40, 3, 0x20, 3, 0x10, 3, 0x8, 5, 0x100, 31, 0x100, 5, 0x80, 31, 0x80, 5, 0x40, 31, 0x40, 5, 0x20, 31, 0x20, 5, 0x10, 31, 0x10, 5, 0x8, 31, 0x8, 5, 0x4, 31, 0x4, 5, 0x2, 31, 0x2, 5, 0x1, 31, 0x1, 6, 0x8, 19, 0x8, 32, 0x8, 45, 0x8, 6, 0x4, 19, 0x4, 32, 0x4, 45, 0x4, 6, 0x2, 19, 0x2, 32, 0x2, 45, 0x2, 17, 0x10, 30, 0x10, 43, 0x10, 56, 0x10, 17, 0x8, 30, 0x8, 43, 0x8, 56, 0x8, 17, 0x4, 30, 0x4, 43, 0x4, 56, 0x4, 18, 0x20, 44, 0x20, 18, 0x10, 44, 0x10, 18, 0x8, 44, 0x8, 18, 0x4, 44, 0x4, 18, 0x2, 44, 0x2, 3, 0x4, 3, 0x2, 3, 0x1, 4, 0x20, 4, 0x10, 4, 0x8, 4, 0x4, 6, 0x1, 19, 0x1, 32, 0x1, 45, 0x1, 17, 0x2, 30, 0x2, 43, 0x2, 56, 0x2, 7, 0x8, 20, 0x8, 33, 0x8, 46, 0x8, 8, 0x8, 21, 0x8, 34, 0x8, 47, 0x8, 17, 0x1, 30, 0x1, 43, 0x1, 56, 0x1, 9, 0x8, 22, 0x8, 35, 0x8, 48, 0x8, 10, 0x8, 23, 0x8, 36, 0x8, 49, 0x8, 11, 0x8, 24, 0x8, 37, 0x8, 50, 0x8, 4, 0x2, 4, 0x1, 7, 0x1, 7, 0x2, 7, 0x4, 8, 0x1, 8, 0x2, 8, 0x4, 9, 0x1, 9, 0x2, 9, 0x4, 10, 0x1, 10, 0x2, 10, 0x4, 11, 0x1, 11, 0x2, 11, 0x4, 20, 0x1, 20, 0x2, 20, 0x4, 21, 0x1, 21, 0x2, 21, 0x4, 22, 0x1, 22, 0x2, 22, 0x4, 23, 0x1, 23, 0x2, 23, 0x4, 24, 0x1, 24, 0x2, 24, 0x4, 33, 0x1, 33, 0x2, 33, 0x4, 34, 0x1, 34, 0x2, 34, 0x4, 35, 0x1, 35, 0x2, 35, 0x4, 36, 0x1, 36, 0x2, 36, 0x4, 37, 0x1, 37, 0x2, 37, 0x4, 46, 0x1, 46, 0x2, 46, 0x4, 47, 0x1, 47, 0x2, 47, 0x4, 48, 0x1, 48, 0x2, 48, 0x4, 49, 0x1, 49, 0x2, 49, 0x4, 50, 0x1, 50, 0x2, 50, 0x4, 12, 0x1, 12, 0x2, 12, 0x4, 13, 0x1, 13, 0x2, 13, 0x4, 14, 0x1, 14, 0x2, 14, 0x4, 15, 0x1, 15, 0x2, 15, 0x4, 16, 0x1, 16, 0x2, 16, 0x4, 25, 0x1, 25, 0x2, 25, 0x4, 26, 0x1, 26, 0x2, 26, 0x4, 27, 0x1, 27, 0x2, 27, 0x4, 28, 0x1, 28, 0x2, 28, 0x4, 29, 0x1, 29, 0x2, 29, 0x4, 38, 0x1, 38, 0x2, 38, 0x4, 39, 0x1, 39, 0x2, 39, 0x4, 40, 0x1, 40, 0x2, 40, 0x4, 41, 0x1, 41, 0x2, 41, 0x4, 42, 0x1, 42, 0x2, 42, 0x4, 51, 0x1, 51, 0x2, 51, 0x4, 52, 0x1, 52, 0x2, 52, 0x4, 53, 0x1, 53, 0x2, 53, 0x4, 54, 0x1, 54, 0x2, 54, 0x4, 55, 0x1, 55, 0x2, 55, 0x4, 18, 0x1, 44, 0x1 }; __device__ static Word16 order_MRDTX[] = { 0, 0x4, 0, 0x2, 0, 0x1, 1, 0x80, 1, 0x40, 1, 0x20, 1, 0x10, 1, 0x8, 1, 0x4, 1, 0x2, 1, 0x1, 2, 0x100, 2, 0x80, 2, 0x40, 2, 0x20, 2, 0x10, 2, 0x8, 2, 0x4, 2, 0x2, 2, 0x1, 3, 0x100, 3, 0x80, 3, 0x40, 3, 0x20, 3, 0x10, 3, 0x8, 3, 0x4, 3, 0x2, 3, 0x1, 4, 0x20, 4, 0x10, 4, 0x8, 4, 0x4, 4, 0x2, 4, 0x1 }; /* Homing frames for the decoder */ __device__ static const Word16 dhf_MR475[PRMNO_MR475] = { 0x00F8, 0x009D, 0x001C, 0x0066, 0x0000, 0x0003, 0x0028, 0x000F, 0x0038, 0x0001, 0x000F, 0x0031, 0x0002, 0x0008, 0x000F, 0x0026, 0x0003 }; __device__ static const Word16 dhf_MR515[PRMNO_MR515] = { 0x00F8, 0x009D, 0x001C, 0x0066, 0x0000, 0x0003, 0x0037, 0x000F, 0x0000, 0x0003, 0x0005, 0x000F, 0x0037, 0x0003, 0x0037, 0x000F, 0x0023, 0x0003, 0x001F }; __device__ static const Word16 dhf_MR59[PRMNO_MR59] = { 0x00F8, 0x00E3, 0x002F, 0x00BD, 0x0000, 0x0003, 0x0037, 0x000F, 0x0001, 0x0003, 0x000F, 0x0060, 0x00F9, 0x0003, 0x0037, 0x000F, 0x0000, 0x0003, 0x0037 }; __device__ static const Word16 dhf_MR67[PRMNO_MR67] = { 0x00F8, 0x00E3, 0x002F, 0x00BD, 0x0002, 0x0007, 0x0000, 0x000F, 0x0098, 0x0007, 0x0061, 0x0060, 0x05C5, 0x0007, 0x0000, 0x000F, 0x0318, 0x0007, 0x0000 }; __device__ static const Word16 dhf_MR74[PRMNO_MR74] = { 0x00F8, 0x00E3, 0x002F, 0x00BD, 0x0006, 0x000F, 0x0000, 0x001B, 0x0208, 0x000F, 0x0062, 0x0060, 0x1BA6, 0x000F, 0x0000, 0x001B, 0x0006, 0x000F, 0x0000 }; __device__ static const Word16 dhf_MR795[PRMNO_MR795] = { 0x00C2, 0x00E3, 0x002F, 0x00BD, 0x0006, 0x000F, 0x000A, 0x0000, 0x0039, 0x1C08, 0x0007, 0x000A, 0x000B, 0x0063, 0x11A6, 0x000F, 0x0001, 0x0000, 0x0039, 0x09A0, 0x000F, 0x0002, 0x0001 }; __device__ static const Word16 dhf_MR102[PRMNO_MR102] = { 0x00F8, 0x00E3, 0x002F, 0x0045, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x001B, 0x0000, 0x0001, 0x0000, 0x0001, 0x0326, 0x00CE, 0x007E, 0x0051, 0x0062, 0x0000, 0x0000, 0x0000, 0x0000, 0x015A, 0x0359, 0x0076, 0x0000, 0x001B, 0x0000, 0x0000, 0x0000, 0x0000, 0x017C, 0x0215, 0x0038, 0x0030 }; __device__ static const Word16 dhf_MR122[PRMNO_MR122] = { 0x0004, 0x002A, 0x00DB, 0x0096, 0x002A, 0x0156, 0x000B, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0036, 0x000B, 0x0000, 0x000F, 0x000E, 0x000C, 0x000D, 0x0000, 0x0001, 0x0005, 0x0007, 0x0001, 0x0008, 0x0024, 0x0000, 0x0001, 0x0000, 0x0005, 0x0006, 0x0001, 0x0002, 0x0004, 0x0007, 0x0004, 0x0002, 0x0003, 0x0036, 0x000B, 0x0000, 0x0002, 0x0004, 0x0000, 0x0003, 0x0006, 0x0001, 0x0007, 0x0006, 0x0005, 0x0000 }; /* parameter sizes (# of bits), one table per mode */ __device__ static const Word16 bitno_MR475[PRMNO_MR475] = { 8, 8, 7, /* LSP VQ */ 8, 7, 2, 8, /* first subframe */ 4, 7, 2, /* second subframe */ 4, 7, 2, 8, /* third subframe */ 4, 7, 2 /* fourth subframe */ }; __device__ static const Word16 bitno_MR515[PRMNO_MR515] = { 8, 8, 7, /* LSP VQ */ 8, 7, 2, 6, /* first subframe */ 4, 7, 2, 6, /* second subframe */ 4, 7, 2, 6, /* third subframe */ 4, 7, 2, 6 /* fourth subframe */ }; __device__ static const Word16 bitno_MR59[PRMNO_MR59] = { 8, 9, 9, /* LSP VQ */ 8, 9, 2, 6, /* first subframe */ 4, 9, 2, 6, /* second subframe */ 8, 9, 2, 6, /* third subframe */ 4, 9, 2, 6 /* fourth subframe */ }; __device__ static const Word16 bitno_MR67[PRMNO_MR67] = { 8, 9, 9, /* LSP VQ */ 8, 11, 3, 7, /* first subframe */ 4, 11, 3, 7, /* second subframe */ 8, 11, 3, 7, /* third subframe */ 4, 11, 3, 7 /* fourth subframe */ }; __device__ static const Word16 bitno_MR74[PRMNO_MR74] = { 8, 9, 9, /* LSP VQ */ 8, 13, 4, 7, /* first subframe */ 5, 13, 4, 7, /* second subframe */ 8, 13, 4, 7, /* third subframe */ 5, 13, 4, 7 /* fourth subframe */ }; __device__ static const Word16 bitno_MR795[PRMNO_MR795] = { 9, 9, 9, /* LSP VQ */ 8, 13, 4, 4, 5, /* first subframe */ 6, 13, 4, 4, 5, /* second subframe */ 8, 13, 4, 4, 5, /* third subframe */ 6, 13, 4, 4, 5 /* fourth subframe */ }; __device__ static const Word16 bitno_MR102[PRMNO_MR102] = { 8, 9, 9, /* LSP VQ */ 8, 1, 1, 1, 1, 10, 10, 7, 7, /* first subframe */ 5, 1, 1, 1, 1, 10, 10, 7, 7, /* second subframe */ 8, 1, 1, 1, 1, 10, 10, 7, 7, /* third subframe */ 5, 1, 1, 1, 1, 10, 10, 7, 7 /* fourth subframe */ }; __device__ static const Word16 bitno_MR122[PRMNO_MR122] = { 7, 8, 9, 8, 6, /* LSP VQ */ 9, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5, /* first subframe */ 6, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5, /* second subframe */ 9, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5, /* third subframe */ 6, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5 /* fourth subframe */ }; __device__ static const Word16 bitno_MRDTX[PRMNO_MRDTX] = { 3, 8, 9, 9, 6 }; #define PRMNO_MR475 17 #define PRMNO_MR515 19 #define PRMNO_MR59 19 #define PRMNO_MR67 19 #define PRMNO_MR74 19 #define PRMNO_MR795 23 #define PRMNO_MR102 39 #define PRMNO_MR122 57 #define PRMNO_MRDTX 5 /* * tables */ /* level adjustment for different modes Q11 */ __device__ static const Word16 dtx_log_en_adjust[9] = { -1023, /* MR475 */ -878, /* MR515 */ -732, /* MR59 */ -586, /* MR67 */ -440, /* MR74 */ -294, /* MR795 */ -148, /* MR102 */ 0, /* MR122 */ 0, /* MRDTX */ }; /* attenuation factors for codebook gain */ __device__ static const Word32 cdown[7] = { 32767, 32112, 32112, 32112, 32112, 32112, 22937 }; /* attenuation factors for adaptive codebook gain */ __device__ static const Word32 pdown[7] = { 32767, 32112, 32112, 26214, 9830, 6553, 6553 }; /* algebraic code book gain MA predictor coefficients */ __device__ static const Word32 pred[NPRED] = { 5571, 4751, 2785, 1556 }; /* algebraic code book gain MA predictor coefficients (MR122) */ __device__ static const Word32 pred_MR122[NPRED] = { 44, 37, 22, 12 }; __device__ static const Word32 gamma4_gamma3_MR122[M] = { 22938, 16057, 11240, 7868, 5508, 3856, 2699, 1889, 1322, 925 }; __device__ static const Word32 gamma3[M] = { 18022, 9912, 5451, 2998, 1649, 907, 499, 274, 151, 83 }; __device__ static const Word32 gamma4_MR122[M] = { 24576, 18432, 13824, 10368, 7776, 5832, 4374, 3281, 2461, 1846 }; /* adaptive codebook gain quantization table (MR122, MR795) */ #define NB_QUA_PITCH 16 __device__ static const Word32 qua_gain_pitch[NB_QUA_PITCH] = { 0, 3277, 6556, 8192, 9830, 11469, 12288, 13107, 13926, 14746, 15565, 16384, 17203, 18022, 18842, 19661 }; /* fixed codebook gain quantization table (MR122, MR795) */ #define NB_QUA_CODE 32 __device__ static const Word32 qua_gain_code[NB_QUA_CODE * 3] = { /* gain factor (g_fac) and quantized energy error (qua_ener_MR122, qua_ener) * are stored: * * qua_ener_MR122 = log2(g_fac) (not the rounded floating point value, but * the value the original EFR algorithm * calculates from g_fac [using Log2]) * qua_ener = 20*log10(g_fac); (rounded floating point value) * * * g_fac (Q11), * qua_ener_MR122 (Q10), * qua_ener (Q10) */ 159, -3776, -22731, 206, -3394, -20428, 268, -3005, -18088, 349, -2615, -15739, 419, -2345, -14113, 482, -2138, -12867, 554, -1932, -11629, 637, -1726, -10387, 733, -1518, -9139, 842, -1314, -7906, 969, -1106, -6656, 1114, -900, -5416, 1281, -694, -4173, 1473, -487, -2931, 1694, -281, -1688, 1948, -75, -445, 2241, 133, 801, 2577, 339, 2044, 2963, 545, 3285, 3408, 752, 4530, 3919, 958, 5772, 4507, 1165, 7016, 5183, 1371, 8259, 5960, 1577, 9501, 6855, 1784, 10745, 7883, 1991, 11988, 9065, 2197, 13231, 10425, 2404, 14474, 12510, 2673, 16096, 16263, 3060, 18429, 21142, 3448, 20763, 27485, 3836, 23097 }; /* gray coding table */ __device__ static const Word8 gray[8] = { 0, 1, 3, 2, 6, 4, 5, 7 }; /* gray decoding table */ __device__ static const Word32 dgray[8] = { 0, 1, 3, 2, 5, 6, 4, 7 }; /* table[i] = sqrt((i+16)*2^-6) * 2^15, i.e. sqrt(x) scaled Q15 */ __device__ static const Word32 sqrt_table[49] = { 16384, 16888, 17378, 17854, 18318, 18770, 19212, 19644, 20066, 20480, 20886, 21283, 21674, 22058, 22435, 22806, 23170, 23530, 23884, 24232, 24576, 24915, 25249, 25580, 25905, 26227, 26545, 26859, 27170, 27477, 27780, 28081, 28378, 28672, 28963, 29251, 29537, 29819, 30099, 30377, 30652, 30924, 31194, 31462, 31727, 31991, 32252, 32511, 32767 }; __device__ static const Word32 inv_sqrt_table[49] = { 32767, 31790, 30894, 30070, 29309, 28602, 27945, 27330, 26755, 26214, 25705, 25225, 24770, 24339, 23930, 23541, 23170, 22817, 22479, 22155, 21845, 21548, 21263, 20988, 20724, 20470, 20225, 19988, 19760, 19539, 19326, 19119, 18919, 18725, 18536, 18354, 18176, 18004, 17837, 17674, 17515, 17361, 17211, 17064, 16921, 16782, 16646, 16514, 16384 }; /* table used inbase 2 logharithm computation */ __device__ static const Word32 log2_table[33] = { 0, 1455, 2866, 4236, 5568, 6863, 8124, 9352, 10549, 11716, 12855, 13967, 15054, 16117, 17156, 18172, 19167, 20142, 21097, 22033, 22951, 23852, 24735, 25603, 26455, 27291, 28113, 28922, 29716, 30497, 31266, 32023, 32767 }; /* table used in 2 to the power computation */ __device__ static const Word32 pow2_table[33] = { 16384, 16743, 17109, 17484, 17867, 18258, 18658, 19066, 19484, 19911, 20347, 20792, 21247, 21713, 22188, 22674, 23170, 23678, 24196, 24726, 25268, 25821, 26386, 26964, 27554, 28158, 28774, 29405, 30048, 30706, 31379, 32066, 32767 }; /* table of cos(x) */ __device__ static const Word32 cos_table[65] = { 32767, 32729, 32610, 32413, 32138, 31786, 31357, 30853, 30274, 29622, 28899, 28106, 27246, 26320, 25330, 24279, 23170, 22006, 20788, 19520, 18205, 16846, 15447, 14010, 12540, 11039, 9512, 7962, 6393, 4808, 3212, 1608, 0, -1608, -3212, -4808, -6393, -7962, -9512, -11039, -12540, -14010, -15447, -16846, -18205, -19520, -20788, -22006, -23170, -24279, -25330, -26320, -27246, -28106, -28899, -29622, -30274, -30853, -31357, -31786, -32138, -32413, -32610, -32729, -32768 }; /* slope used to compute y = acos(x) */ __device__ static const Word32 acos_slope[64] = { -26887, -8812, -5323, -3813, -2979, -2444, -2081, -1811, -1608, -1450, -1322, -1219, -1132, -1059, -998, -946, -901, -861, -827, -797, -772, -750, -730, -713, -699, -687, -677, -668, -662, -657, -654, -652, -652, -654, -657, -662, -668, -677, -687, -699, -713, -730, -750, -772, -797, -827, -861, -901, -946, -998, -1059, -1132, -1219, -1322, -1450, -1608, -1811, -2081, -2444, -2979, -3813, -5323, -8812, -26887 }; /* All impulse responses are in Q15 */ /* phase dispersion impulse response (MR795) */ __device__ static const Word32 ph_imp_low_MR795[] = { 26777, 801, 2505, -683, -1382, 582, 604, -1274, 3511, -5894, 4534, -499, -1940, 3011, -5058, 5614, -1990, -1061, -1459, 4442, -700, -5335, 4609, 452, -589, -3352, 2953, 1267, -1212, -2590, 1731, 3670, -4475, -975, 4391, -2537, 949, -1363, -979, 5734 }; /* phase dispersion impulse response (MR795) */ __device__ static const Word32 ph_imp_mid_MR795[] = { 30274, 3831, -4036, 2972, -1048, -1002, 2477, -3043, 2815, -2231, 1753, -1611, 1714, -1775, 1543, -1008, 429, -169, 472, -1264, 2176, -2706, 2523, -1621, 344, 826, -1529, 1724, -1657, 1701, -2063, 2644, -3060, 2897, -1978, 557, 780, -1369, 842, 655 }; /* phase dispersion impulse response (MR475 - MR67) */ __device__ static const Word32 ph_imp_low[] = { 14690, 11518, 1268, -2761, -5671, 7514, -35, -2807, -3040, 4823, 2952, -8424, 3785, 1455, 2179, -8637, 8051, -2103, -1454, 777, 1108, -2385, 2254, -363, -674, -2103, 6046, -5681, 1072, 3123, -5058, 5312, -2329, -3728, 6924, -3889, 675, -1775, 29, 10145 }; /* phase dispersion impulse response (MR475 - MR67) */ __device__ static const Word32 ph_imp_mid[] = { 30274, 3831, -4036, 2972, -1048, -1002, 2477, -3043, 2815, -2231, 1753, -1611, 1714, -1775, 1543, -1008, 429, -169, 472, -1264, 2176, -2706, 2523, -1621, 344, 826, -1529, 1724, -1657, 1701, -2063, 2644, -3060, 2897, -1978, 557, 780, -1369, 842, 655 }; /* initialization table for the MA predictor in DTX */ #define PAST_RQ_INIT_SIZE 8 /* initalization table for MA predictor in dtx mode */ __device__ static const Word32 past_rq_init[80] = { -258, -318, -439, -634, -656, -773, -711, -502, -268, -193, -2, 125, 122, -39, -9, 105, 129, 283, 372, 575, -277, -324, -197, -487, -445, -362, -292, -27, 177, 543, 342, 517, 516, 130, 27, -104, -120, -140, -74, -56, -564, -943, -1520, -965, -814, -526, -322, -2, 159, 657, -312, -284, -386, -597, -493, -526, -418, -229, 105, 449, -557, -870, -1075, -919, -950, -752, -709, -316, 62, 486, -314, -191, -203, -330, -160, -103, -51, 131, 338, 515 }; #define ALPHA 29491 #define ONE_ALPHA 3277 /* LSF means (not in MR122) */ __device__ static const Word32 mean_lsf_3[10] = { 1546, 2272, 3778, 5488, 6972, 8382, 10047, 11229, 12766, 13714 }; #define ALPHA_122 31128 #define ONE_ALPHA_122 1639 /* LSF means ->normalize frequency domain */ __device__ static const Word32 mean_lsf_5[10] = { 1384, 2077, 3420, 5108, 6742, 8122, 9863, 11092, 12714, 13701 }; /* LSF prediction factors (not in MR122) */ __device__ static const Word32 pred_fac[10] = { 9556, 10769, 12571, 13292, 14381, 11651, 10588, 9767, 8593, 6484 }; #define DICO1_SIZE_3 256 #define DICO2_SIZE_3 512 #define DICO3_SIZE_3 512 /* 1st LSF quantizer (not in MR122 and MR795) */ __device__ static const Word32 dico1_lsf_3[] = { 6, 82, -131, 154, -56, -735, 183, -65, -265, 9, -210, -361, 113, 718, 1817, 1010, 1214, 1573, 857, 1333, 2276, 827, 1568, 1933, 717, 1989, 2206, 838, 1172, 1823, 721, 1000, 2154, 286, 476, 1509, -247, -531, 230, 147, -82, 569, 26, -177, -944, -27, -273, 692, -164, -264, -183, 224, 790, 1039, 899, 946, 601, 485, 771, 1150, 524, 677, 903, -140, 375, 778, 410, 676, 429, 301, 530, 1009, 719, 646, 38, 226, 367, 40, 145, -45, -505, 290, 121, -121, 302, 127, 166, -124, -383, -956, -358, -455, -977, 715, 878, 894, 978, 923, 211, 477, 272, 64, 188, -78, 17, -143, -65, 38, 643, 586, 621, -134, -426, -651, 347, 545, 2820, 1188, 2726, 2442, 142, -80, 1735, 283, 130, 461, -262, -399, -1145, -411, 155, 430, 329, 375, 779, 53, -226, -139, -129, -236, 1682, 285, 744, 1327, 738, 697, 1664, 312, 409, 266, 325, 720, 135, 1, 221, 453, 8, 203, 145, 299, 640, 760, 29, 468, 638, 103, 429, 379, 420, 954, 932, 1326, 1210, 1258, 704, 1012, 1152, -166, -444, -266, -316, -130, -376, 191, 1151, 1904, -240, -543, -1260, -112, 268, 1207, 70, 1062, 1583, 278, 1360, 1574, -258, -272, -768, 19, 563, 2240, -3, -265, 135, -295, -591, -388, 140, 354, -206, -260, -504, -795, -433, -718, -1319, 109, 331, 962, -429, -87, 652, -296, 426, 1019, -239, 775, 851, 489, 1334, 1073, -334, -332, 25, 543, 1206, 1807, 326, 61, 727, 578, 849, 1405, -208, -277, 329, -152, 64, 669, -434, -678, -727, -454, -71, 251, 605, 480, 254, -482, 11, 996, -289, 395, 486, 722, 1049, 1440, -30, -316, -786, -106, -115, -619, 861, 1474, 1412, 1055, 1366, 1184, 812, 1237, 925, 42, -251, -576, 342, 141, -454, -168, -80, 1359, -342, -656, -1763, 100, 821, 725, 990, 747, 800, 332, 440, 568, 663, 379, 852, 112, 165, -369, 597, 910, 282, -8, 834, 1281, -352, 572, 695, 462, 2246, 1806, 345, 190, 1374, 416, 915, 2166, 168, -82, 280, -516, -446, 840, 47, 533, 44, -362, -711, -1143, 22, 193, 1472, -85, 233, 1813, -62, 579, 1504, 550, 944, 1749, 723, 650, 1148, 972, 884, 1395, -425, 643, 0, 1000, 952, 1098, 249, 1446, 672, -334, -87, 2172, -554, 1882, 2672, 140, 1826, 1853, 920, 1749, 2590, 1076, 1933, 2038, -137, -443, -1555, 1269, 1174, 468, -493, -122, 1521, -451, 1033, 1214, 482, 1695, 1118, 815, 649, 384, -446, -692, 107, -319, -605, -118, -207, -505, 525, -468, -12, 2736, 75, 1934, 1305, 880, 2358, 2267, 1285, 1575, 2004, -48, -304, -1186, -435, -461, -251, -366, -404, -547, -289, -605, -597, -538, -810, -165, -120, 3, 356, 639, 1241, 1502, 96, 177, 750, -435, -585, -1174, -356, 109, -79, -485, 288, 2005, 9, 1116, 731, 880, 2134, 946, -265, 1585, 1065, 1157, 1210, 843, -498, -668, 431, 374, 321, -229, 1440, 2101, 1381, 449, 461, 1155, -105, 39, -384, -263, 367, 182, -371, -660, 773, -188, 1151, 971, 1333, 1632, 1435, 774, 1267, 1221, -482, -832, -1489, -237, -210, 860, 890, 1615, 1064, 472, 1062, 1192, 185, 1077, 989, -568, -992, -1704, -449, -902, -2043, -142, -377, -458, -210, -554, -1029, -11, 1133, 2265, -329, -675, -893, -250, 657, 1187, 519, 1510, 1779, 520, 539, 1403, 527, 1421, 1302, -563, -871, -1248, -147, -463, 879, -76, 2334, 2840, 563, 2573, 2385, 632, 1926, 2920, 719, 2023, 1840, -545, -723, 1108, 129, -125, 884, 1417, 1632, 925, -94, 1566, 1751, -341, 1533, 1551, 591, 395, -274, -76, 981, 2831, 153, 2985, 1844, 1032, 2565, 2749, 1508, 2832, 1879, 791, 1199, 538, -190, -453, 1489, -278, -548, 1158, -245, 1941, 2044, 1024, 1560, 1650, 512, 253, 466, -62, -323, 1151, -473, -376, 507, -433, 1380, 2162, 899, 1943, 1445, 134, 704, 440, 460, 525, -28, -450, 279, 1338, 0, 971, 252, -445, -627, -991, -348, -602, -1424, 398, 712, 1656, -107, 314, -178, 93, 2226, 2238, 518, 849, 656, -462, -711, -447, 174, -34, 1191, -119, 42, 1005, -372, 274, 758, 1036, 2352, 1838, 675, 1724, 1498, 430, 1286, 2133, -129, -439, 0, -373, 800, 2144, 6, 1587, 2478, 478, 596, 2128, -428, -736, 1505, 385, 178, 980, 139, 449, 1225, -526, -842, -982, 145, 1554, 1242, 623, 1448, 656, 349, 1016, 1482, 31, -280, 415, -316, 724, 1641, 360, 1058, 556, -436, -358, 1201, -355, 1123, 1939, 401, 1584, 2248, -527, -1012, 355, 233, 238, 2233, -550, -897, -639, -365, -501, 1957, 389, 1860, 1621, 162, 1132, 1264, -237, 1174, 1390, -640, -411, 116, -228, 1694, 2298, 1639, 2186, 2267, 562, 1273, 2658, 323, 338, 1774, 578, 1107, 852, 22, 594, 934, -143, 718, 446 }; /* 2nd LSF quantizer (not in MR122) */ __device__ static const Word32 dico2_lsf_3[] = { 50, 71, -9, -338, -698, -1407, 102, -138, -820, -310, -469, -1147, 414, 67, -267, 1060, 814, 1441, 1548, 1360, 1272, 1754, 1895, 1661, 2019, 2133, 1820, 1808, 2318, 1845, 644, -93, 454, 858, 329, -136, 489, -258, -128, -198, -745, -41, -52, -265, -985, 346, 137, 479, -1741, -748, -684, -1163, -1725, -367, -895, -1145, -784, -488, -946, -968, -85, -390, -725, 215, -340, -171, 1020, 916, 1969, 564, 179, 746, 662, 977, 1734, 887, 622, 914, 939, 856, 1165, 309, 688, 803, 917, 161, 570, 118, -20, -283, -816, -42, 204, -1228, -325, -462, -963, -202, -143, -988, -484, -361, -702, -978, -477, -302, -790, -1188, -100, -786, -1088, -1054, -947, -1684, -202, -843, -782, -1039, -1378, -901, -624, -110, -85, 356, 213, -10, -493, 364, 774, 425, 822, 479, -83, 557, 520, -992, -1560, -572, -603, -741, -26, -502, -638, -903, 209, 306, 147, -316, -593, -596, -85, -211, -225, -918, -529, 117, 233, -439, -738, 1101, 751, 633, 1457, 1716, 1511, 1765, 1457, 910, 1122, 1156, 849, 1354, 868, 470, -871, -1150, -1796, -871, -861, -992, -118, 155, 212, -1051, -849, -606, -1117, -1849, -2750, -1019, -1427, -1869, 370, -184, -414, 959, 493, 104, 958, 1039, 543, 154, 653, 201, 1249, 507, 150, 663, 503, 230, 623, 777, 675, 659, 88, -110, 843, 244, 224, 382, 541, 302, 724, 433, 666, 1166, 734, 341, -138, 20, -397, -1183, -424, -46, -321, -352, -124, 1333, 1021, 1080, 262, 366, 723, 922, 283, -551, 31, -636, -611, -689, -697, -415, -952, -779, -201, -1329, -598, -359, -953, -1285, 166, 493, 305, 221, 846, 703, 610, 840, 936, 774, -723, -1324, -1261, -357, -1025, -1388, -1096, -1376, -365, -1416, -1881, -608, -1798, -1727, -674, -545, -1173, -703, 678, 786, 148, -123, 696, 1288, 644, 350, -10, 414, 614, 15, 137, 344, -211, -814, -1512, -819, -391, -930, -588, 47, -591, -898, -909, -1097, -163, -1272, -1167, -157, -1464, -1525, -389, -1274, -1188, -624, 671, 213, 454, 124, -274, -525, -729, -496, -152, -1344, 122, 135, -2905, -589, -394, -1728, 441, -50, 1476, 904, 787, 316, 236, -440, -347, 217, 413, -911, -917, 121, -455, -932, 202, -92, -465, -375, 488, 390, 474, 876, 729, 316, -1815, -1312, -669, 87, 962, 432, 563, -249, -1058, 250, 285, 1105, 1141, 427, 696, -1038, -1664, -1582, -948, 346, 160, -309, -272, -858, 670, 624, 1250, -944, -408, -666, -606, -320, -384, -492, 230, 65, 334, -50, -16, -16, -690, -1397, 1791, 1716, 1399, 2478, 2063, 1404, 1245, 1471, 1426, -382, -1037, -2, 173, -398, 1145, 1491, 2024, 1801, 772, 1274, 1506, 1429, 1735, 2001, 1079, 1218, 1273, -1154, -1851, -1329, -808, -1133, -1096, -451, -1033, -1722, 65, 578, -84, -1476, -2434, -1778, -765, -1366, -494, -218, -594, -931, 337, -236, 562, 2357, 2662, 1938, 1489, 1276, 874, 189, 358, 374, -1519, -2281, -2346, -967, -1271, -2095, -628, -1188, -1542, 1661, 1043, 546, 565, 1061, 732, -64, -836, -434, -436, -96, 203, 1078, 1216, 1636, 907, 1534, 986, 326, 965, 845, 142, -84, 197, 470, 2379, 1570, 1133, 470, 1214, 395, 1376, 1200, 1125, 1042, 348, -543, -1234, -376, -215, -181, 481, -1947, -1621, -210, -750, -1185, 390, 29, -399, 27, 820, 1236, 755, 695, 979, 409, -174, 1197, 1035, 912, 1356, 1846, -992, -1437, 484, -1485, -1700, 208, -412, 1204, 1432, -271, 896, 1144, -416, 1777, 1434, -1696, -2644, -204, -1789, -1551, 1033, -1656, -1559, 1303, -1253, -1589, 1081, -669, -1095, -66, -682, 320, -345, 659, 305, 1069, -1292, -804, -19, -1635, -1291, 29, -1683, -497, 71, -287, -7, -100, -494, -962, -237, 852, 1881, 1740, -1217, -1387, 227, -660, 302, 373, 96, 1087, 1257, -1074, -1669, 160, 485, 2076, 1798, -934, -220, 552, -596, -612, 237, 336, 1720, 879, 643, 629, 434, 1267, 522, 1633, 15, 244, -441, 1475, 717, 184, 1819, 1590, 1709, 988, 261, 937, 2093, 2345, 1520, 2139, 1858, 1606, -577, -579, -1203, -956, 135, -488, -464, 51, -338, -629, -348, -723, 1146, 2073, 1442, 2192, 1466, 911, -1444, -1572, -2278, 1400, 710, 1297, 1335, 633, 928, 1434, 2194, 2594, 2422, 2204, 1881, 982, 2242, 1854, 380, 792, 1145, -63, -539, 414, -252, -964, -314, -1261, -683, -780, -831, -526, -1005, -1666, -1135, -424, -1611, -452, -299, 1268, 1048, 642, 1147, 853, 856, -675, -336, 139, 2268, 1343, 1418, 29, 768, 797, -1224, 423, 564, -1318, -1082, 245, -1302, -812, 573, -1298, -1617, 646, -968, 834, 723, 993, 1652, 2027, -191, -817, 432, 662, 60, 198, 626, 997, 1330, 1648, 1963, 1289, -1597, -93, -45, -1088, 37, -84, 1653, 2607, 2337, 1065, 2040, 2377, 1139, 2326, 2118, 859, 357, 1510, 664, 1227, 1099, 479, 1360, 912, 1897, 1754, 2019, 1168, 1909, 1784, 399, 34, 256, -593, -304, -1053, 547, 1694, 1407, 647, -99, -341, 1492, 1647, 1190, 38, -644, -212, 395, 846, 222, -704, -765, -716, -724, -1964, -2804, -150, 291, -82, 1233, 1459, 1007, -140, -155, 153, 439, 297, 1568, -1529, -410, -636, 1536, 455, -237, -1328, -139, -260, 531, 554, 868, 269, 1264, 606, -233, 883, 463, 742, 600, -120, -73, 421, 212, -439, -58, 804, -1286, -1241, 728, 294, -490, 50, -591, -905, -1254, 42, -687, 147, -25, 273, 596, -311, 1213, 601, -754, 849, 584, 429, 607, 587, -602, -166, 461, -796, -823, 777, 1380, 910, 1755, 119, 1417, 972, -219, -880, -1596, -1049, -1010, 438, -713, -1379, 78, 0, -447, -1179, -1136, -1319, -1573, 2248, 1767, 1309, 946, 1583, 1432, 1150, 482, 436, -469, -1108, 618, -447, -966, 1088, -1252, -1515, -114, -1104, -2008, -579, 210, 613, 497, -1975, -1437, 642, -1269, -856, 1011, -1646, -1185, 1063, -1555, -672, 1204, -1692, -1114, 623, -979, -1326, -1277, 539, -147, 894, -1354, -897, -434, 888, 475, 428, 153, -384, 338, -1492, -511, 359, -974, -1115, -470, 105, -550, 677, -937, -1145, 877, 380, -260, 210, 1685, 924, 1256, 1775, 1190, 1095, 1419, 631, 533, 627, 299, -347, -411, -534, 647, -650, 29, -595, -378, -1367, 1563, 1402, 1121, 1465, 1089, 1410, 648, -2096, -1090, -6, 311, -194, -869, -639, -831, 416, -1162, -1224, 1349, -1247, -941, 1813, -2193, -1987, 453, -619, -1367, -956, -1606, -1972, -1507, -1175, -1057, -1104, -377, 601, 201, 1876, 825, 374, -430, -1323, 29, -1397, -1249, -1331, -1007, -1504, 960, -1401, -2009, 197, -1379, -1949, -236, -1077, 123, 422, 615, 1269, 546, -306, 1526, 904, 1194, 1788, 1177, -626, -884, -1526, 199, 766, 1504, -1065, 862, 197, -1034, -1773, -887, -800, 145, 599, -1134, -519, 626, -1205, -1926, 500, -910, -1041, -1395, -1476, -1567, -969, -523, 842, 34, 1794, 646, 862, -1207, -1888, -1002, -78, -9, -672, 1044, 759, 80, -600, 1139, 1019, 57, 2000, 1422, -833, 1414, 1121, -1202, 1630, 1260, -461, 1420, 1244, 1537, 975, 253, -283, 324, -359, 599, -195, 106, 588, 62, -587, -757, 645, 205, 51, 1201, 758, -1209, 673, -390, -624, 1581, 941, -151, 1023, 735, 2820, 1301, 690, -302, 524, -99, -900, -1588, -1189, 1084, 251, 238, 2014, 1792, 1010, 1245, 1633, 1741, -1227, -1540, -1208, -621, 456, -109, 40, -65, 788, -805, -699, -1350, -583, 904, 832, -801, 532, 594, 1972, 1408, 1351, -1177, -1880, -2114, -773, 568, 948, -1015, 1079, 1260, -1111, 482, -130, 1778, 1044, 780, -1491, 245, 912, -316, -1141, -917, -536, -1442, -2346, -785, -1546, -1988, -2003, 257, 909, -1849, -633, -1209, -1538, -1918, -1054, 1606, 2239, 1576, -567, -1500, -1544, -1279, 195, 1369, -817, 293, 1219, -525, 630, 1197, -1698, -2425, -1840, -303, 731, 747, -1169, -251, 269, -950, -75, 1684, -1182, -453, 1005, -1599, 585, 378, -2075, -571, -427, -529, -1159, -1171, -283, -205, -564, -796, 1246, 717, 2277, 927, 539, -454, 559, 440, -717, 1460, 1615, -1030, 1052, 1610, -1169, -138, 847, 226, 39, -612, -1251, -106, -729, -651, 968, 1302, -714, -636, 1727, 353, 1069, 410, -798, -156, 1099, -574, 918, 446, -1310, 1012, 466, 1408, 1591, 765, 1429, 1380, 1757, 1949, 1956, 2378, 1578, 2047, 2148, 916, 98, -7, 1893, 1418, 2141, 348, 1405, 1579, 152, 1134, 1801, -267, 154, 1395, -1166, 469, 1054, -1142, -405, -1073, -1341, -2264, -1581, -364, 869, 1706, -1162, 549, 1550, -1225, -1932, -1666, -1485, -1977, -2055, -1727, -906, -98, -1897, 233, 1492, 892, 108, -331, -1728, -1170, -1700, -1060, 1980, 1790, -1070, -1741, -1909, -11, 1539, 1317, -1600, 94, 497, 421, 443, -197, -1578, -349, -994, -599, -539, 1140, -965, -1419, -129, -1341, 175, -447, -375, 1311, 2055, -371, -650, -307, -1073, 605, 365, -2057, -113, 430, 652, 914, 967, -1012, -1586, -2323, 1505, 1248, 559, 262, -486, -401, -1727, 1342, 1546, 50, 56, 432, -330, 119, -604, -1517, -1080, -810, 946, 1127, 1055, -1400, -1703, -1712, -1270, -704, -1317, 807, 1821, 1143, 2760, 1606, 2171, 1120, 409, -150, -147, 404, 959, 2439, 1911, 2189, -906, -141, -866, -904, -142, -458, -557, -708, -1679, -830, -1431, -1583, -1842, -1346, -1086, -1604, -272, 915, -1196, 772, 1056, -638, -1234, -1897, -500, -81, -822, -1289, -1613, -735, -117, 785, 168, -1090, 1133, 922, -1096, -746, 1384, 287, -547, -1063, -1376, -2201, -1204, -2176, -1570, -1757, -1511, -2241, -771, -1737, 1099, 830, -1588, 724, 1243, -1542, 693, 805, -1690, -240, 1665, -1700, -4, -668, 2149, 816, 1042, -818, -1841, 22, -764, -507, 449, -1151, -617, 289, -843, -1596, -240, 498, -234, -657, -752, 480, 1678, -319, -481, 193, -811, 171, -119, -2128, -202, -848, 1717, 1140, 1700 }; /* 3rd LSF quantizer (not in MR122, MR515 and MR475) */ __device__ static const Word32 dico3_lsf_3[] = { 67, -17, 66, -12, -1690, -581, -104, -272, -1076, -1186, -1845, -376, -1140, -926, -420, -58, -259, -656, -1134, -553, 1788, 1227, 455, 129, 462, 441, -240, -528, 840, 514, 130, -75, 1114, 623, 153, 216, 1068, 564, -6, -276, 1119, 727, 190, -68, 704, 306, 119, -264, 329, 61, -100, 156, 364, 123, 183, -208, -171, -123, 220, -65, -306, -62, 402, 17, -660, -938, -266, 0, 385, 235, 276, 285, 320, 268, -336, -200, -724, 17, -84, 381, -544, 429, 494, 519, -117, 288, 304, 329, 643, 157, 701, 508, 1200, 625, 796, 608, 998, 421, 492, 632, 1204, 780, 446, 132, 1257, 844, 547, 449, 829, 658, 541, 470, 1132, 1258, 918, 639, 547, 51, 423, 279, 9, 392, 83, 94, 542, 543, 229, -147, -198, 129, 194, -185, -863, -1321, -302, 30, -597, -629, -19, 114, -900, -1081, 466, 353, -1483, -1573, 15, -143, -1708, -2059, -751, 196, -1876, -2067, -642, -258, -2335, -1470, -450, -564, -584, -186, -872, -414, -1805, -988, -1125, -1310, -726, -1129, 28, 169, -1039, -864, -718, -246, 484, 36, -233, -49, 265, 67, 289, 467, 178, 543, 810, 540, 84, 282, 672, 703, -975, -777, 129, 287, -938, -227, 955, 595, -1617, -289, 836, 649, -1847, -215, 1106, 718, -2034, -1085, 650, 440, -2101, -529, 907, 575, -2011, -336, 670, 204, -2389, -692, 360, 137, -2156, -2204, -9, 280, -266, 119, 39, 193, 78, -59, -120, 226, -975, -858, -781, -1095, -619, -413, -451, -842, -1216, -1321, -813, -883, -1376, -1615, -394, -428, -737, -1113, -549, -790, -880, -975, -967, -642, -985, -886, -1273, -1361, -473, -804, -1401, -1407, 160, -265, -919, -275, -248, -250, -718, -380, 97, -103, -375, -229, -415, -193, -135, -555, 628, 361, 119, 216, 579, 364, 391, 209, 634, 522, -154, -148, 526, 389, 170, 33, 105, 267, 64, 380, -1503, -1000, -30, -369, -1070, 58, 647, 223, -1520, -291, 621, 307, -1531, 156, 762, 404, -2029, 141, 734, 499, -1849, -650, 306, 512, -187, -104, -59, 438, 134, -230, 156, -186, -61, -260, -16, 10, -569, -3, -421, -297, -1725, -521, -346, 178, -1362, -59, -44, 157, -2146, -461, -470, -349, -2170, -1, -369, -121, -1579, -373, -900, -1015, -1117, -591, -613, -784, -561, 122, -75, -449, -4, -171, -123, -372, 192, 168, -76, -132, 252, -107, 340, 210, 392, 509, 272, 181, -109, 145, 218, 119, -416, -263, 485, 265, -181, -8, -286, 226, -244, -218, 69, -290, -158, 191, -1, -64, -592, -90, 213, -96, 255, 435, 178, -80, -369, -18, -33, -80, -42, 415, 140, -222, 1143, 651, 649, 329, 767, 556, 249, 235, 948, 413, 442, 279, 141, 339, 356, 557, -470, -170, 99, 237, -569, -800, 352, 565, 282, 473, 470, 332, -199, -690, -1284, -917, -193, -426, -800, -1122, -26, -371, -490, -193, 637, 595, 519, 330, 408, -115, 79, 12, 477, 87, -103, -376, -666, -347, -277, -291, -510, -481, 169, 297, -829, -738, -205, -171, -320, -540, 328, 283, -859, -958, 442, -2, 556, 686, 130, 56, 1383, 1012, 755, 427, 612, 741, 628, 553, -339, -796, 134, 277, -633, -1085, -2, -246, -880, -1035, -1607, -1064, -994, -474, -1138, -488, -414, -795, 73, -206, -8, -139, 439, 204, -176, -578, 23, 131, -269, -757, -191, 245, -109, -338, 112, 316, 120, -406, -118, 611, -180, -186, -645, 115, -173, 34, -518, -489, -151, 61, -583, -844, 220, -138, -681, -1020, 391, -17, -598, -321, 157, -295, 129, 155, -926, -875, -987, 285, 241, -83, -125, -125, 620, 597, 432, 92, 393, 78, 409, 61, -393, -739, -413, -748, 83, 54, 361, 27, -1084, 130, -337, -694, -1565, 297, 318, -19, -1873, 36, 51, -317, -2323, -246, 231, -84, -2306, -783, 40, -179, -2233, -930, -474, -462, -754, -86, -288, -626, -2411, -455, -63, 171, -1099, -1094, -26, -143, -1193, -455, -406, -381, -605, -210, -96, -51, -580, -476, -276, -15, -1195, -634, -1203, -881, -378, -221, -669, -952, 594, 178, -403, -676, 763, 327, 601, 290, 172, 300, 203, 157, -56, -336, 356, 24, -228, -296, -259, -29, -186, 263, 416, 14, -353, 373, -12, -216, 257, 96, 174, 57, -1526, -616, -954, -499, -497, -152, -333, 125, 105, 200, 179, -97, -331, -224, 765, 697, 760, 256, 301, 59, 455, -85, 204, 288, -514, 240, 251, -109, 256, 417, -34, -413, 101, 430, 384, 156, -31, -10, 206, 426, 589, 145, 143, 71, 808, 906, 333, 349, 986, 938, 589, 331, 1300, 824, 187, 509, 1062, 653, 379, 466, 1462, 937, 401, 274, 787, 861, 265, 2, 609, 553, 28, 305, 926, 340, 106, 386, 241, -267, -147, 225, -178, -534, 347, 502, -643, -381, 397, 30, -651, -733, -435, 398, -407, -726, -484, -248, -789, -914, -438, -476, -498, -390, 75, -295, -964, -590, -606, 150, -121, -49, -155, -78, 935, 550, 389, 38, -321, 127, 424, 315, -285, -113, 283, 259, 658, 203, 322, 486, 903, 505, 748, 417, 611, 423, 555, 512, 239, -83, -578, -19, -339, -731, 349, 13, -934, -1399, -114, -360, 107, 692, 182, 90, -1243, -1538, -1551, -725, -568, -903, -1363, -525, -517, -853, -861, -1004, -168, -690, -835, 63, -137, -556, -547, 144, -286, -817, 485, 319, -147, -408, 526, 246, -347, -434, 297, -28, -290, -471, -1110, -1285, -460, -359, -988, -794, 1347, 1299, 690, 523, 1216, 1068, 1094, 757, 825, 1140, 752, 494, 1252, 1365, 1195, 898, 521, 1053, 532, 432, -334, -216, -313, -263, -160, 52, -472, -155, 127, 136, -380, 44, 851, 410, -162, -489, 123, -255, -796, -667, 1090, 917, 789, 493, 1397, 1197, 558, 202, -51, -118, -342, -701, 83, 108, -42, -441, 61, 95, 287, 256, -27, 89, 524, 531, 351, 227, 592, 545, 697, 155, -164, 307, 638, 274, -489, -50, 754, 240, -166, -124, -116, -579, -1212, -63, 190, -295, -1040, -1296, 147, -376, -177, -113, 841, 1241, 1051, 668, 2, 293, 551, 304, -1096, -953, -248, 376, -750, -965, 87, 516, -275, -516, 689, 391, -379, -643, 876, 594, -390, -1013, -645, 573, -107, -568, -689, -826, -1025, -27, -328, -203, 861, 749, 548, 233, -1660, -1043, 451, 108, -660, -620, 430, 236, 21, -396, -1158, -631, 1372, 1298, 967, 577, 1125, 1125, 589, 454, -323, -865, -467, 153, -468, -699, -804, -509, -392, -718, -204, -35, -603, -1093, -567, -162, -505, -1004, -102, 350, 219, 224, 423, 252, 395, 591, 608, 363, -746, -96, 373, 172, 171, 295, 714, 339, 233, 77, 107, 277, 157, 153, -499, -356, 1547, 1073, 576, 494, -292, -339, -504, -592, -903, -72, -619, -481, -1594, -1117, -567, -254, -793, -507, -564, -291, -492, -532, 502, 560, -382, 427, 600, 230, -227, 477, 251, 75, 285, 842, 813, 476, -1310, -1333, 186, 377, -587, -917, 643, 381, -1186, -553, 411, 82, -1127, -820, -174, -540, -604, 119, 543, 205, -380, 657, 909, 567, 112, -298, -374, 114, -857, -251, 56, 159, 401, 345, -34, -140, -111, -607, 41, 614, 355, -114, -77, 474, 578, 56, 1450, 924, 1098, 1420, 741, 400, 246, 22, 588, 313, -121, 327, 831, 472, -1138, -608, 856, 552, -1241, -1072, 638, 600, -358, 254, -333, -303, -646, 739, 358, 74, 1226, 1671, 1221, 849, 2241, 1624, 983, 636, 1841, 1477, 749, 384, 350, 263, 87, 128, -1902, -941, -144, -64, -1734, -255, 288, -31, -2644, -1238, 366, 235, -1643, -1092, -1344, -304, -541, -1075, -1116, 123, -1178, -252, -816, -180, -1016, 533, 565, 233, -487, -430, -188, 334, 867, 1236, 534, 171, -1590, -1607, 635, 630, -2196, 310, 924, 412, -2358, -328, 956, 529, -2639, -377, 630, 278, -2602, 317, 799, 299, -2406, 133, 340, 31, -2156, -1468, 131, 125, -1184, -490, -139, 46, -744, 447, 891, 564, 67, -451, 646, 604, -553, -429, -876, 396, 162, -66, 1305, 915, 479, 579, 1088, 794, 450, 278, 566, 324, -1057, -154, 148, -177, -2545, 168, 1070, 592, -2351, -42, 819, 345, -2344, -707, 721, 250, -2175, -1497, -309, 122, -78, -73, 120, 173, -4, 262, -263, -261, -431, -64, -405, -732, -2609, 116, -83, -193, -1525, -944, -477, -725, -508, 307, 170, 172, 832, 417, 832, 686, -225, 177, 894, 818, -482, -389, 1279, 1039, -383, 201, -350, 40, 730, 635, 226, 526, 503, 462, 338, 398, 535, 714, 40, -282, 1482, 1471, 1085, 731, 1561, 1072, 909, 693, 1419, 1282, 889, 879, 1153, 728, 1186, 840, -226, 1130, 949, 689, -494, -986, -1556, -128, -568, -721, -713, -26, 317, 524, 70, 135, -405, -865, -1766, -652, -174, -801, 885, 773, -153, -91, 1099, 751, -506, -1149, 853, 646, 241, 782, 519, 539, 1853, 1700, 1101, 684, -1249, -1486, -464, 188, -893, -1409, -1312, -341, -135, 438, -175, 18, 1111, 976, 319, 208, -1430, -1768, 83, 458, -530, -1000, 307, 129, -840, -15, -29, -356, -911, -924, -1147, -242, -119, -528, 127, -133, -761, -765, 190, -83, -315, 895, 522, 231, -222, 102, -63, -428, 316, 699, 379, 70, 25, 716, 314, -108, 507, 874, 566, 238, 108, 941, 519, 195, 425, -60, -427, 257, 139, -103, -630, 446, 334, 370, 412, 48, -172, -690, -283, 557, 187, -286, 158, 483, 140, 270, -344, -631, 924, 579, -116, 132, 142, 466, -68, -64, 230, -145, -302, -542, -803, -912, 1018, 737, -773, 1015, 630, 297, -2596, 95, 445, 336, -2122, 491, 510, 191, -1253, 161, -2, -324, -1450, -633, -712, -105, -842, -254, -411, 100, -640, -290, 1010, 763, -650, 313, 1169, 730, 140, 505, 1030, 766, 772, 287, 1067, 823, 495, 749, 305, 323, -164, 462, 78, 399, -342, -874, 69, 597, -16, 620, 621, 337, -138, -444, -265, 218, 84, -450, 953, 666, -222, -803, 541, 604, -921, -1376, 244, 116, -841, -723, 630, 588, 140, 663, 294, 368, 935, 1046, 881, 759, 1746, 1464, 916, 628, 436, 963, 281, 1, -119, 74, 542, 213, 1, -567, 301, 241, 260, 435, 222, 396, 936, 957, 1108, 703, 510, 506, 808, 478, 601, 694, 960, 620, 972, 741, 980, 600, 834, 717, 767, 684, 643, 972, 935, 638, 501, 661, 720, 851, -105, -632, -303, -117, -429, 130, 789, 442, -522, -188, 704, 373, -759, 42, 814, 523, -531, -1137, 373, 578, -682, -1203, -455, 285, -1163, -1577, -1098, 44, 81, -82, 712, 363, 477, 246, 954, 622, 1604, 1622, 1277, 891, 1409, 859, 924, 892, 774, 1041, 947, 1142, 40, -546, -75, 288, -616, -106, -697, -26, -169, -160, -891, -739, -279, -384, -1029, -350, 1781, 1308, 1046, 816, 1580, 1533, 1472, 1178, 1505, 1076, 1216, 899, 890, 904, 564, 654, 920, 692, 1021, 856, -493, 132, 177, 505, 71, 195, -28, 97, 456, 351, -164, 88, 439, 278, -40, 350, 1395, 949, 234, -95, -805, -472, 38, -163, 367, -98, 489, 523, 1025, 1178, 1212, 906, 319, 1314, 814, 461, -123, -543, -804, 447, -748, -324, -897, -1127, -737, -501, -789, -713, 715, 777, 1239, 922, 1949, 1939, 1368, 865, 730, 880, 758, 388, -871, 454, 17, -251, -381, -810, -1583, 239, -521, -966, -792, 259, -890, -1358, -770, -73, 166, 349, -212, 323, -840, -301, 473, 435, -679, -464, 728, 351, -156, -199, 667, 432, 29, -252, 415, 480, -731, -379, 145, 559, -528, -631, -1158, -159, 445, 273, 123, 639, 373, -126, 800, 568, 84, -162, 720, 712, -830, -536, -185, 222, 408, 452, 501, 771, -897, -1355, -67, 442, -792, -1406, 566, 602, 167, -326, 509, 330, -95, -626, -730, -344, 1668, 1217, 779, 455, 1316, 828, 584, 719, 404, -31, 1013, 789, 89, 107, 891, 549, 871, 1581, 917, 671, 866, 1479, 1289, 854, 391, 1068, 1122, 812, 78, -562, 345, 563, 429, -103, 417, 787, -122, -437, 411, 788, -913, -417, 602, 754, -226, -16, 151, 760, -700, 118, -104, -14, -1128, 48, 284, 393, -390, -419, -639, -116, -910, 306, 316, -13, 1207, 984, 821, 669, -1195, -693, 140, -213, -884, -416, -199, -558, -616, 245, -404, -664, 262, 56, -617, -724, -85, -491, -320, -656, -570, -831, -129, -528, -1506, -63, -367, -385, -358, -321, 4, 51, -366, -214, 319, 511, 146, 671, -17, -291, -110, 464, -139, -496, -202, 220, -312, -631, -660, -73, -655, -820, -662, -653, -1288, -857, -430, -953, -959, -264, -49, -468, -72, -381, -350, -563, -193, -407, 55, -408, -803, 11, -309, 649, 188, -198, -512, 461, -79, -458, -1318, -263, -134, -523, -1657, -435, -495, -765, 57, -347, -414, 434, -1141, -242, -664, -857, 34, -68, -707, -338 }; #define MR515_3_SIZE 128 /* 3rd LSF quantizer (MR515 and MR475) */ __device__ static const Word32 mr515_3_lsf[] = { 419, 163, -30, -262, -455, -789, -1430, -721, 1006, 664, 269, 25, 619, 260, 183, 96, -968, -1358, -388, 135, -693, 835, 456, 154, 1105, 703, 569, 363, 1625, 1326, 985, 748, -220, 219, 76, -208, -1455, -1662, 49, 149, -964, -172, -752, -336, 625, 209, -250, -66, -1017, -838, -2, 317, -2168, -1485, -138, 123, -1876, -2099, -521, 85, -967, -366, -695, -881, -921, -1011, -763, -949, -124, -256, -352, -660, 178, 463, 354, 304, -1744, -591, -282, 79, -2249, 175, 867, 499, -138, -180, -181, -21, -2291, -1241, -460, -520, -771, 451, -10, -308, 271, -65, 4, 214, -279, -435, -43, -348, -670, 35, -65, -211, 806, 535, 85, 297, 57, 239, 722, 493, 225, 661, 840, 547, -540, -376, 14, 349, 469, 721, 331, 162, -544, -752, -62, -10, 398, -88, 724, 701, -19, -533, -94, 601, 136, -71, -681, -747, -166, -344, 261, -50, 161, -52, 485, 337, -1675, 50, 190, -93, -2282, -231, -194, -82, -95, -595, -154, 128, 894, 501, 588, 457, -345, 206, 122, 110, -631, -227, -569, 3, 408, 239, 397, 226, -197, -2, 128, 491, 1281, 904, 292, 215, 538, 306, 259, 509, -677, -1047, 13, 321, -679, -588, -358, -212, -558, 243, 646, 479, 486, 342, 634, 532, 107, 802, 331, 136, -112, -398, -1031, -286, -326, -705, 288, 272, 1299, 1144, 1178, 860, -423, 121, -385, -148, -295, -302, -834, -819, 16, -24, -201, -476, 555, 91, -245, 294, -38, -379, -962, -1221, -1191, -1518, -273, -395, -390, -1013, -645, 573, -1843, -1030, 505, 468, 744, 947, 609, 493, -689, -1172, -628, -135, -1026, 195, 411, 196, 1582, 1147, 575, 337, -1239, -777, -648, -142, 595, 825, 967, 735, -1206, -970, -81, -342, -745, 13, -72, 375, 454, 19, 1407, 921, -1647, -172, 861, 562, 928, 1537, 1063, 740, -2472, -952, 264, 82, -502, -965, -1334, 123, 867, 1236, 534, 171, -2320, -460, 780, 363, -1190, -617, 252, -61, -174, 34, 1011, 788, -2333, 247, 423, 153, -16, -355, 262, 449, -1576, -1073, -544, -371, -615, -305, 1051, 805, 687, 528, 6, -182, 935, 875, 1002, 809, 199, 257, 126, 76, -584, -1138, 599, 556, -1105, -1391, -1591, -519, -977, -1325, 108, 347, -722, -975, 365, 101, -145, 681, 249, -153, 0, -334, -570, 159, 412, 285, -336, -617, -953, -966, 887, 689, -1251, 84, -185, -398, -592, 433, 1044, 653, 85, 329, -40, 361, -433, -705, 466, 574, -154, 654, 592, 290, -167, 72, 349, 175, 674, 297, 977, 720, 1235, 1204, 757, 488, -400, -269, 538, 372, -1350, -1387, -1194, -91, 1262, 876, 775, 700, -599, -38, -430, -722, 1976, 1630, 991, 608, 111, 276, -226, -96, -947, -388, -11, -7, -303, -531, -839, 338, 1734, 1710, 1405, 1013, -516, -855, -645, 210, -688, -416, 513, 230, -822, -637, -1146, -320, -952, -658, -694, 183, -114, -623, 818, 674, -191, -204, 731, 635, 51, 1221, 883, 576, -954, -431, 826, 598, -342, -755, -900, -407, -1126, -354, -206, -512, -547, -810, -357, -620, 66, 515, -73, -410, -872, -945, -1444, -1227, 191, -17, -544, -231, -1540, -544, -901, -886 }; #define MR795_1_SIZE 512 /* 1st LSF quantizer (MR795) */ __device__ static const Word32 mr795_1_lsf[] = { -890, -1550, -2541, -819, -970, 175, -826, -1234, -762, -599, -22, 634, -811, -987, -902, -323, 203, 26, -383, -235, -781, -399, 1262, 906, -932, -1399, -1380, -624, 93, 87, -414, -539, -691, 37, 633, 510, -387, -476, -1330, 399, 66, 263, -407, -49, -335, -417, 1041, 1865, -779, -1089, -1440, -746, -858, 832, -581, -759, -371, -673, -506, 2088, -560, -634, -1179, 271, 241, 14, -438, -244, -397, 463, 1202, 1047, -606, -797, -1438, -51, -323, 481, -224, -584, -527, 494, 881, 682, -433, -306, -1002, 554, 659, 222, 171, -160, -353, 681, 1798, 1565, -852, -1181, -1695, -336, -666, 114, -581, -756, -744, -195, 375, 497, -465, -804, -1098, 154, 282, -131, -50, -191, -719, 323, 732, 1542, -722, -819, -1404, 105, -250, 185, -178, -502, -742, 321, 510, 1111, -323, -567, -966, 127, 484, 338, -160, 52, -338, 732, 1367, 1554, -626, -802, -1696, -286, -586, 676, -695, -343, -370, -490, 295, 1893, -630, -574, -1014, -80, 645, -69, -6, -318, -364, 782, 1450, 1038, -313, -733, -1395, 120, 60, 477, -264, -585, -123, 711, 1245, 633, -91, -355, -1016, 771, 758, 261, 253, 81, -474, 930, 2215, 1720, -808, -1099, -1925, -560, -782, 169, -804, -1074, -188, -626, -55, 1405, -694, -716, -1194, -660, 354, 329, -514, -55, -543, 366, 1033, 1182, -658, -959, -1357, -55, -184, 93, -605, -286, -662, 404, 449, 827, -286, -350, -1263, 628, 306, 227, -16, 147, -623, 186, 923, 2146, -674, -890, -1606, -443, -228, 339, -369, -790, -409, 231, 86, 1469, -448, -581, -1061, 594, 450, -177, -124, -170, -447, 671, 1159, 1404, -476, -667, -1511, -77, -138, 716, -177, -372, -381, 451, 934, 915, -250, -432, -822, 272, 828, 446, 26, 19, -31, 698, 1692, 2168, -646, -977, -1924, -179, -473, 268, -379, -745, -691, 11, 127, 1033, -488, -917, -825, 61, 323, 135, 147, -145, -686, 685, 786, 1682, -506, -848, -1297, 35, 90, 222, -23, -346, -670, 455, 591, 1287, -203, -593, -1086, 652, 352, 437, 39, 63, -457, 841, 1265, 2105, -520, -882, -1584, -328, -711, 1421, -596, -342, -70, 209, 173, 1928, -423, -598, -921, 421, 605, -38, -2, -245, -127, 896, 1969, 1135, -379, -518, -1579, 173, 118, 753, -55, -381, -52, 985, 1021, 753, -2, -291, -891, 753, 992, 423, 264, 131, -196, 895, 2274, 2543, -635, -1088, -2499, -529, -982, 526, -764, -830, -548, -436, 316, 599, -675, -940, -746, -57, 236, -11, -201, -81, -798, 16, 845, 1558, -737, -985, -1212, -468, 17, 290, -279, -584, -700, 183, 822, 705, -265, -492, -1187, 421, 152, 468, -390, 166, -268, 39, 1550, 1868, -635, -966, -1571, -453, -492, 910, -284, -1027, -75, -181, -133, 1852, -445, -624, -1174, 420, 367, -49, -389, -212, -169, 707, 1073, 1208, -539, -710, -1449, 83, -163, 484, -236, -543, -355, 338, 1175, 814, -246, -309, -958, 606, 760, 60, 166, -8, -163, -306, 1849, 2563, -747, -1025, -1783, -419, -446, 209, -718, -566, -534, -506, 693, 857, -463, -697, -1082, 325, 431, -206, -15, -8, -763, 545, 919, 1518, -611, -783, -1313, 256, -55, 208, -165, -348, -662, 321, 680, 930, -326, -429, -951, 484, 446, 570, -197, 72, -73, 909, 1455, 1741, -563, -737, -1974, -124, -416, 718, -478, -404, -314, -16, 446, 1636, -551, -537, -750, -58, 638, 214, 55, -185, -271, 1148, 1301, 1212, -483, -671, -1264, 117, 285, 543, -204, -391, -111, 513, 1538, 854, -114, -190, -978, 877, 595, 464, 260, 260, -311, 748, 2283, 2216, -517, -945, -2171, -326, -708, 378, -812, -691, -232, -560, 687, 1409, -732, -690, -836, -359, 645, 386, -265, 62, -678, 145, 1644, 1208, -555, -988, -1233, -78, 14, 114, -327, -358, -489, 392, 677, 697, -201, -236, -1140, 693, 449, 178, -243, 256, -433, 611, 1385, 2456, -612, -901, -1464, -307, -17, 499, -315, -667, -254, 256, 428, 1463, -486, -422, -1056, 655, 370, 18, -102, -185, -276, 755, 1578, 1335, -488, -603, -1418, 182, -93, 870, -73, -458, -348, 835, 862, 957, -282, -333, -746, 547, 839, 428, 273, -89, 13, 940, 1708, 2576, -418, -1084, -1758, -44, -358, 259, -497, -643, -560, 99, 557, 961, -421, -766, -917, 295, 326, 184, 175, 15, -626, 532, 878, 1981, -443, -768, -1275, 221, 156, 268, 39, -363, -505, 695, 772, 1140, -162, -459, -912, 709, 444, 658, 25, 303, -312, 1268, 1410, 1715, -297, -766, -1836, -263, -108, 1070, -406, -13, -129, 57, 438, 2734, -374, -487, -835, 304, 696, 164, 104, -235, 5, 1611, 1900, 1399, -229, -582, -1325, 405, 192, 817, -87, -438, 111, 1028, 1199, 993, 68, -175, -934, 1033, 1117, 451, 478, 200, -248, 2127, 2696, 2042, -835, -1323, -2131, -799, -692, 466, -812, -1032, -469, -622, 288, 920, -701, -841, -1070, -411, 512, 8, -390, -91, -744, -30, 1043, 1161, -822, -1148, -1156, -294, -46, 110, -411, -374, -678, 214, 531, 668, -406, -420, -1194, 487, 232, 303, -318, 91, -472, 123, 1232, 2445, -722, -952, -1495, -738, -675, 1332, -543, -606, -211, -95, -98, 1508, -549, -514, -1193, 473, 211, 73, -288, -112, -389, 537, 1332, 1258, -567, -755, -1545, 71, -283, 632, -170, -481, -493, 681, 1002, 817, -356, -331, -877, 419, 706, 346, 241, -34, -326, 377, 1950, 1883, -727, -1075, -1625, -233, -543, 116, -524, -806, -585, -73, 478, 729, -288, -925, -1143, 173, 447, -52, 68, -229, -606, 449, 529, 1797, -591, -875, -1363, 183, -144, 324, -103, -452, -666, 623, 488, 1176, -238, -511, -1004, 326, 552, 458, 136, 108, -319, 626, 1343, 1883, -490, -646, -1730, -186, -449, 984, -738, -76, -170, -550, 755, 2560, -496, -510, -947, 210, 694, -52, 84, -322, -199, 1090, 1625, 1224, -376, -603, -1396, 343, 74, 632, -175, -502, -32, 972, 1332, 734, 52, -295, -1113, 1065, 918, 160, 393, 107, -397, 1214, 2649, 1741, -632, -1201, -1891, -719, -277, 353, -651, -880, -122, -211, 209, 1338, -562, -714, -1059, -208, 388, 159, -320, -61, -551, 293, 1092, 1443, -648, -865, -1253, -49, -143, 305, -401, -227, -585, 561, 532, 927, -117, -443, -1188, 507, 436, 292, -79, 233, -458, 671, 1025, 2396, -633, -842, -1525, -308, -286, 640, -373, -621, -407, 418, 253, 1305, -315, -581, -1137, 572, 685, -281, 61, -68, -371, 991, 1101, 1498, -493, -683, -1362, -47, 164, 704, -256, -314, -268, 631, 949, 1052, -118, -348, -833, 68, 1180, 568, 152, 117, 34, 1113, 1902, 2239, -601, -959, -1706, -143, -489, 480, -332, -655, -574, 54, 353, 1192, -462, -652, -796, 150, 549, 112, 195, -111, -515, 679, 1108, 1647, -558, -749, -1217, -9, 272, 341, -53, -265, -535, 489, 843, 1298, -120, -482, -1032, 632, 543, 408, 179, 306, -526, 1124, 1464, 2244, -417, -786, -1562, -224, -384, 1364, -377, -459, -25, 385, 489, 2174, -332, -651, -829, 544, 553, 61, 22, -113, -89, 1128, 1725, 1524, -216, -373, -1653, 161, 316, 908, -165, -222, -67, 1362, 1175, 789, 73, -252, -767, 738, 932, 616, 362, 246, -126, 787, 2654, 3027, -691, -1106, -2190, -565, -588, 524, -590, -979, -490, -263, 397, 982, -577, -837, -945, -22, 435, -49, -190, -118, -629, -88, 1240, 1513, -636, -1051, -1019, -291, 189, 259, -257, -470, -629, 145, 945, 894, -326, -364, -1094, 543, 260, 630, -202, 189, -209, 357, 1379, 2091, -569, -1075, -1449, -714, -239, 919, -420, -705, -84, -109, -114, 2407, -413, -529, -1177, 482, 368, 131, -186, -72, -131, 861, 1255, 1220, -611, -658, -1341, 227, -121, 631, -176, -489, -218, 745, 1175, 957, -321, -148, -936, 671, 966, 216, 340, -3, -143, 469, 1848, 2437, -729, -961, -1683, -213, -254, 321, -511, -438, -521, -126, 725, 903, -340, -685, -1032, 316, 480, 20, 23, -89, -551, 353, 1051, 1789, -544, -757, -1364, 298, -25, 436, -100, -392, -519, 467, 754, 1078, -210, -398, -1078, 620, 658, 630, 33, 147, -178, 921, 1687, 1921, -325, -528, -1978, 2, -285, 910, -371, -490, -230, 0, 597, 2010, -496, -395, -834, 37, 945, 245, 181, -160, -144, 1481, 1373, 1357, -355, -601, -1270, 298, 322, 672, -193, -336, 77, 1089, 1533, 922, 177, -39, -1125, 996, 781, 536, 456, 366, -432, 1415, 2440, 2279, -466, -758, -2325, -303, -509, 387, -727, -557, 66, -145, 643, 1248, -544, -676, -916, -225, 862, 588, -152, 40, -533, 423, 1423, 1558, -572, -843, -1145, -128, 85, 461, -238, -257, -584, 605, 748, 861, 24, -202, -1409, 797, 487, 303, -181, 364, -182, 616, 1378, 2942, -494, -852, -1441, -292, 61, 812, -84, -723, -182, 555, 532, 1506, -365, -493, -1057, 822, 588, 11, -14, -18, -230, 1001, 1401, 1451, -474, -569, -1292, 302, 62, 1062, -70, -376, -222, 982, 974, 1149, -196, -234, -795, 479, 1098, 499, 362, 58, 70, 1147, 2069, 2857, -487, -878, -1824, 73, -288, 348, -358, -500, -508, 199, 721, 1242, -78, -697, -795, 361, 536, 196, 374, 110, -735, 847, 1051, 1896, -366, -713, -1182, 315, 320, 429, 72, -215, -450, 759, 886, 1363, -30, -428, -834, 861, 627, 796, 118, 468, -279, 1355, 1883, 1893, -188, -642, -1612, 63, -175, 1198, -418, -211, 51, 414, 587, 2601, -234, -557, -858, 424, 889, 222, 136, -101, 83, 1413, 2278, 1383, -84, -445, -1389, 414, 313, 1045, 29, -343, 65, 1552, 1647, 980, 183, -91, -829, 1273, 1413, 360, 553, 272, -107, 1587, 3149, 2603 }; #define DICO1_SIZE_5 128 #define DICO2_SIZE_5 256 #define DICO3_SIZE_5 256 #define DICO4_SIZE_5 256 #define DICO5_SIZE_5 64 /* 1st LSF quantizer (MR122) */ __device__ static const Word32 dico1_lsf_5[DICO1_SIZE_5 * 4] = { -451, -1065, -529, -1305, -450, -756, -497, -863, -384, -619, -413, -669, -317, -538, -331, -556, -414, -508, -424, -378, -274, -324, -434, -614, -226, -500, -232, -514, -263, -377, -298, -410, -151, -710, -174, -818, -149, -412, -156, -429, -288, -462, -186, -203, -170, -302, -191, -321, -131, -147, -297, -395, -228, -214, -245, -192, -67, -316, -71, -327, -104, -205, -94, -183, -143, -38, -193, -95, 16, -76, -124, -248, 23, -237, 24, -244, 18, -136, 44, -111, -33, -24, -25, 0, 149, 19, 23, -143, 158, -169, 174, -181, 133, -55, 165, -26, 111, 84, 98, 75, 87, 183, -115, -11, -8, 130, 11, 170, 254, 77, 205, 17, 183, 112, 262, 194, 202, 287, 95, 189, -42, -105, 234, 179, 39, 186, 163, 345, 332, 199, 299, 161, -54, 285, -78, 281, -133, 141, -182, 111, 249, 341, 271, 364, 93, 403, 75, 391, 92, 510, -138, 220, -185, -29, -34, 361, -115, 320, 3, 554, 99, 286, 218, 591, -245, 406, -268, 453, 0, 580, 25, 606, 275, 532, 148, 450, -73, 739, -285, 518, -288, 94, -203, 674, -140, -74, 205, 714, -114, 299, 176, 923, 182, 557, 240, 705, -16, 513, 485, 593, 293, 384, 451, 617, -38, 50, 563, 529, 303, 209, 459, 363, 433, 452, 450, 454, 367, 606, 477, 741, 432, 353, 368, 267, 361, 716, 273, 583, 453, 166, 510, 172, 201, 629, 274, 191, 568, 639, 302, 298, 634, 387, 643, 350, 587, 560, 612, 565, 600, 788, 487, 672, 512, 1015, 321, 333, 357, 854, -125, 413, 474, 712, 17, -151, 564, 285, 270, -241, 971, 889, 489, 220, 510, 896, 549, 924, 327, 825, 290, 911, 540, 1108, 158, 805, 199, 957, 511, 730, 100, 874, 13, 791, 435, 632, 676, 972, 249, 900, 467, 1218, 781, 1074, 585, 785, -23, 669, 267, 1043, 619, 1084, 615, 1145, 622, 905, 916, 1049, 80, 331, 584, 1075, 89, 639, 988, 961, 770, 720, 798, 699, 492, 447, 899, 627, 271, 1188, 725, 1333, 87, 603, 832, 1603, 616, 1127, 890, 1505, 1000, 1156, 866, 1009, 995, 827, 1149, 858, 817, 1450, 773, 1320, 500, 1389, 312, 1153, -20, 1084, 64, 1283, 2, 1172, 399, 1869, 514, 1706, 502, 1636, 886, 1522, 416, 600, 1131, 1350, 1275, 1390, 889, 1795, 914, 1766, 227, 1183, 1250, 1826, 505, 1854, 919, 2353, -199, 431, 152, 1735, -213, -28, 392, 1334, -153, -52, 978, 1151, -323, -400, 813, 1703, -136, 84, 1449, 2015, -331, -143, -137, 1192, -256, 534, -157, 1031, -307, -439, 542, 731, -329, -420, -97, 616, -362, -168, -322, 366, -247, -110, -211, 89, -196, -309, 20, 59, -364, -463, -286, 89, -336, 175, -432, 141, -379, -190, -434, -196, -79, 150, -278, -227, -280, 166, -555, -422, -155, 541, -366, 54, -29, -83, -301, -774, 186, 628, -397, -264, 242, 293, -197, -585, 124, 410, 53, -133, 10, 340, -570, -1065, 65, -446, 68, -493, 383, 937, -357, -711, -359, -250, -677, -1068, 292, -26, 363, 6, 607, 1313, -127, -10, 1513, 1886, 713, 972, 1469, 2181, 1443, 2016 }; /* 2nd LSF quantizer (MR122) */ __device__ static const Word32 dico2_lsf_5[DICO2_SIZE_5 * 4] = { -1631, -1600, -1796, -2290, -1027, -1770, -1100, -2025, -1277, -1388, -1367, -1534, -947, -1461, -972, -1524, -999, -1222, -1020, -1172, -815, -987, -992, -1371, -1216, -1006, -1289, -1094, -744, -1268, -755, -1293, -862, -923, -905, -984, -678, -1051, -685, -1050, -1087, -985, -1062, -679, -989, -641, -1127, -976, -762, -654, -890, -806, -833, -1091, -706, -629, -621, -806, -640, -812, -775, -634, -779, -543, -996, -565, -1075, -580, -546, -611, -572, -619, -760, -290, -879, -526, -823, -462, -795, -253, -553, -415, -589, -439, -533, -340, -692, -935, -505, -772, -702, -1131, -263, -306, -971, -483, -445, -74, -555, -548, -614, -129, -693, -234, -396, -246, -475, -250, -265, -404, -376, -514, -417, -510, -300, -313, -334, -664, -463, -814, -386, -704, -337, -615, -234, -201, -233, -239, -167, -567, -203, -619, -147, -415, -115, -352, -166, -750, -171, -761, -270, -879, -264, -903, -367, -744, 43, -475, 14, -653, 43, -670, 11, -448, -59, -521, -126, -119, -155, -613, -42, -863, -27, -931, 136, -483, 183, -468, 55, -298, 55, -304, 313, -609, 313, -720, 322, -167, 100, -541, -3, -119, -111, -187, 233, -236, 260, -234, 26, -165, 134, -45, -40, -549, 360, -203, 378, -388, 450, -383, 275, 20, 182, -103, 246, -111, 431, 37, 462, -146, 487, -157, -284, -59, 503, -184, 24, 53, -3, 54, 122, 259, 333, 66, 484, 104, 436, 68, 195, 116, 190, 206, 269, -9, 482, 352, 382, 285, 399, 277, 452, 256, 69, 186, 13, 297, -13, 259, -95, 30, 56, 394, 196, 425, 205, 456, 281, 577, 15, 191, 375, 290, 407, 576, -56, 227, 544, 405, 0, 549, -92, 528, -229, 351, -245, 338, -362, 435, 167, 527, -75, 302, 91, 824, 129, 599, 496, 679, 186, 749, 153, 737, -281, 600, -348, 615, -236, 769, 41, 881, 38, 890, -220, 841, -357, 883, -393, 903, -634, 474, -444, 850, -175, 678, -493, 242, -519, 785, -714, 582, -541, 366, -543, 434, -597, 500, -765, 222, -702, 917, -743, 962, -869, 501, -899, 548, -379, 200, -435, 157, -819, 214, -861, 157, -614, 40, -632, 94, -883, -54, -741, 516, -501, 298, -614, -171, -870, -161, -865, -23, -818, 93, -1015, -267, -662, -359, -549, 2, -442, -121, -377, 0, -227, 33, -414, -126, -129, 212, -934, 34, -1082, -282, -1119, -268, -710, -825, -420, -191, -1076, -928, -917, -93, -628, -358, 97, 7, -206, -393, -101, 24, -203, 38, -168, 83, -599, -423, -279, 426, -700, 118, -75, 206, -981, -673, -680, 417, -367, 37, -279, 474, -129, -318, 319, 296, -626, -39, 343, 602, -696, -39, -303, 940, 104, 233, -380, 137, -36, 269, -75, -214, 120, 43, -529, -477, 459, 164, -202, -229, -49, -167, 609, 792, 98, -220, 915, 148, 293, 283, 869, 91, 575, 394, 326, -78, 717, 67, 365, -323, 616, -36, 731, 27, 619, 238, 632, 273, 448, 99, 801, 476, 869, 273, 685, 64, 789, 72, 1021, 217, 793, 459, 734, 360, 646, 480, 360, 322, 429, 464, 638, 430, 756, 363, 1000, 404, 683, 528, 602, 615, 655, 413, 946, 687, 937, 602, 904, 604, 555, 737, 786, 662, 467, 654, 362, 589, 929, 710, 498, 478, 415, 420, 693, 883, 813, 683, 781, 925, 913, 939, 726, 732, 491, 853, 531, 948, 734, 963, 315, 808, 761, 755, 1144, 760, 655, 1076, 826, 1057, 1091, 838, 1003, 808, 1047, 1133, 659, 1101, 992, 1050, 1074, 1075, 971, 694, 1226, 1054, 571, 841, 884, 1404, 1379, 1096, 1080, 861, 1231, 735, 1284, 760, 1272, 991, 1367, 1053, 1257, 700, 1050, 534, 988, 453, 1264, 599, 1140, 679, 1621, 815, 1384, 521, 1317, 393, 1564, 805, 1448, 686, 1068, 648, 875, 307, 1083, 361, 1047, 317, 1417, 964, 675, 571, 1152, 79, 1114, -47, 1530, 311, 1721, 314, 1166, 689, 514, -94, 349, 282, 1412, 328, 1025, 487, -65, 57, 805, 970, 36, 62, 769, -263, 791, -346, 637, 699, -137, 620, 534, 541, -735, 194, 711, 300, -268, -863, 926, 769, -708, -428, 506, 174, -892, -630, 435, 547, -1435, -258, 621, 471, -1018, -1368, -393, 521, -920, -686, -25, 20, -982, -1156, 340, 9, -1558, -1135, -352, 48, -1579, -402, -887, 6, -1156, -888, -548, -352, -1643, -1168, -159, 610, -2024, -963, -225, 193, -1656, -1960, -245, -493, -964, -1680, -936, -635, -1299, -1744, -1388, -604, -1540, -835, -1397, -135, -1588, -290, -1670, -712, -2011, -1632, -1663, -27, -2258, -811, -1157, 184, -1265, 189, -1367, 586, -2011, 201, -790, 712, -1210, 3, -1033, 808, -1251, 830, -111, 635, -1636, 447, -463, -949, -445, -928, -504, -1162, -501, -1211, 144, -351, -372, -1052, -283, -1059, -279, -1123, -575, -1438, -587, -1614, -935, -984, 229, 690, -921, -719, -403, 1362, -685, -465, 874, 397, -509, -46, 317, 1334, -485, 456, 813, 439, -411, 339, 898, 1067, -425, 46, 1441, 497, -909, -800, 1465, 1046, -254, -321, 1430, 1165, 68, 350, 1034, 666, 370, 11, 1311, 790, 143, 232, 1041, 1562, -114, 663, 1616, 1078, 454, 579, 1275, 1040, -76, 909, 752, 1067, 153, 512, 348, 1214, 614, 385, 1843, 808, 269, 1034, 203, 1086, 652, 1017, 1783, 1130, 429, 1327, 387, 1384, -49, 1183, -72, 1215, -416, 1001, 544, 1749, -352, 1223, -502, 1199, -589, 569, -227, 1630, -142, 1578, -230, 1715, -714, 1288, -838, 1398, 1131, 1357, -208, 1232, 437, 965, -929, 818, 811, 1410, 859, 1507, 164, 1212, 1387, 1793, 484, 1874, 456, 2063, 996, 1170, 1326, 1402, 1316, 1360, 1135, 1262, 1234, 1618, 1361, 1768, 1421, 1227, 1584, 1347, 854, 672, 1685, 1566, 1139, 1270, 2016, 1825, 1773, 1581, 1532, 1460, 1487, 946, 1659, 1021, 1744, 1212, 1392, 977, 1772, 1161, 1826, 1164, 1718, 1429, 1973, 1591, 1185, 864, 2132, 1061, 1799, 814, 1838, 757, 2104, 1315, 2054, 1258, 2113, 915, 2331, 930, 1467, 1147, 2590, 1439, 2245, 1744, 2090, 1620, 2358, 1454, 2666, 1506, 1876, 1837, 2070, 1975, 1739, 1577, 682, 1289, 1584, 2045, 1454, 2098, 2498, 2004, 2711, 2066, 726, 1588, 2756, 2336, 228, 847, 2456, 1659, 36, 301, 1942, 1957, -446, -96, 2154, 1396, 1533, 1101, 14, 608, -923, -732, 1383, 1982, 1345, 952, -680, 321, 1281, 1268, -1594, 365, 941, 946, -1737, -822, 2374, 2787, 1821, 2788 }; /* 3rd LSF quantizer (MR122) */ __device__ static const Word32 dico3_lsf_5[DICO3_SIZE_5 * 4] = { -1812, -2275, -1879, -2537, -1640, -1848, -1695, -2004, -1220, -1912, -1221, -2106, -1559, -1588, -1573, -1556, -1195, -1615, -1224, -1727, -1359, -1151, -1616, -1948, -1274, -1391, -1305, -1403, -1607, -1179, -1676, -1311, -1443, -1478, -1367, -898, -1256, -1059, -1331, -1134, -982, -1133, -1149, -1504, -1080, -1308, -1020, -1183, -980, -1486, -967, -1495, -988, -922, -1047, -1077, -838, -1179, -858, -1222, -1131, -1041, -1064, -767, -872, -1157, -701, -880, -706, -906, -774, -1016, -578, -1080, -801, -1478, -591, -1111, -592, -1146, -713, -1388, -640, -1376, -597, -1059, -416, -903, -686, -832, -661, -708, -444, -868, -490, -921, -374, -776, -619, -1170, -585, -549, -769, -795, -435, -659, -530, -741, -498, -837, -357, -597, -279, -871, -243, -887, -282, -665, -280, -667, -165, -560, -394, -903, -362, -410, -448, -583, -409, -574, -313, -357, -637, -548, -570, -436, -896, -504, -382, -757, -58, -481, -165, -618, -191, -374, -234, -382, -222, -683, -25, -480, -418, -359, -730, -353, -324, -157, -432, -322, -394, -303, -284, -104, -601, -289, -556, -196, -588, -150, -659, -608, -473, -24, -68, -448, -474, -8, -506, -45, -748, -184, -844, -252, -901, -91, -584, -97, -652, 138, -764, -131, -678, -12, -670, 165, -259, -3, -840, -107, -909, 37, -992, 44, -854, -415, -839, 13, -1001, -271, -1026, -309, -798, -478, -832, -488, -943, 168, -1112, -387, -1185, -101, -1183, -40, -941, -316, -1030, -770, -1044, -625, -1081, -538, -1224, -299, -1312, -436, -1197, -663, -1167, -161, -1216, -690, -1237, -831, -1432, -720, -1403, -493, -898, -740, -922, -801, -1102, -402, -1579, -964, -1061, -638, -1269, -1438, -1499, -934, -1502, -895, -1598, -564, -1723, -717, -606, -597, -1166, -1085, -1369, -468, -1946, -1493, -1838, -953, -1932, -931, -1499, -188, -1635, -421, -1457, -338, -1448, -22, -1942, -422, -2006, -249, -496, -114, -1910, -755, -1289, 174, -1451, -109, -482, -257, -1221, -508, -1617, 151, -1694, 208, -654, 107, -1651, 29, -1141, 279, -1215, 306, -1228, -506, -730, -175, -1236, -101, -969, 551, -870, 278, -823, 315, -563, 376, -1051, 228, -507, 280, -599, 281, -758, 253, -305, 379, -755, -134, -611, 660, -824, 536, -817, 646, -413, 49, -341, 177, -453, 526, -482, 589, -71, 339, -657, 264, -244, 295, -237, 315, -387, 569, -506, -9, -377, 14, -160, 661, -216, 40, -308, -46, 95, 214, -242, 167, -86, 192, -56, 27, -76, 31, 36, 309, -106, -182, -113, 74, -441, -22, 23, 139, 81, -11, 44, 15, -87, -137, -118, -207, -158, -58, 272, -92, -156, -441, 8, -136, 128, -221, 101, -218, 40, -197, -76, -456, 9, -445, 33, -423, 226, 60, 73, -222, 156, -399, 280, -318, 245, -341, 166, -499, 339, -190, 327, -219, 325, -137, -89, -596, 100, -627, 144, -677, 487, 28, 252, -391, 214, -41, 282, -28, 99, -286, 331, 49, 459, -388, 565, -369, 436, 28, 336, -9, 397, -167, 618, 34, 596, -17, 561, -140, 299, 79, 522, 125, 203, 2, 244, 288, 255, 211, 175, 82, 596, 187, 517, 108, 381, 255, 365, 297, 497, 352, 327, -82, 25, 210, 371, 245, 261, 3, 545, 449, 140, 294, 44, 295, 212, 347, 244, 494, 331, 528, 201, 307, 349, 411, 613, 284, 614, 413, 464, 322, 624, 397, 97, 200, -160, 384, 149, 362, 495, 525, 269, 585, 33, 491, -121, 433, 427, 611, 498, 516, 171, 443, 497, 666, 440, 275, 566, 575, 146, 639, 155, 670, -33, 173, 212, 696, -166, 601, -191, 695, -489, 503, 175, 742, 214, 476, 372, 1083, 578, 530, 586, 777, 425, 874, 315, 841, 374, 848, -165, 565, 35, 991, -39, 1062, 329, 712, 786, 840, 645, 795, 661, 676, 571, 918, 632, 1079, 673, 817, 318, 388, 874, 1012, 564, 848, 880, 620, 557, 479, 671, 453, 692, 468, 840, 642, 844, 645, 506, 428, 897, 567, 837, 387, 962, 499, 691, 561, 939, 926, 783, 296, 790, 268, 1028, 530, 874, 329, 548, 143, 675, 291, 503, 66, 1041, 359, 786, 97, 805, 33, 837, 470, 511, 49, 1092, 327, 1174, 323, 3, 242, 872, 474, 689, 429, 1329, 678, 1042, 620, 1109, 664, 321, 193, 889, 950, 1153, 874, 893, 635, 877, 862, 948, 913, 1293, 665, 1320, 639, 997, 793, 1402, 1030, 1176, 1012, 1110, 959, 1410, 925, 1403, 915, 543, 862, 1116, 1222, 835, 1190, 835, 1190, 959, 1148, 1147, 1376, 1300, 1193, 1415, 1231, 1335, 1341, 746, 1092, 1711, 1283, 1389, 1073, 1334, 1566, 1153, 1475, 1645, 1137, 1825, 1220, 1056, 1382, 1521, 1730, 1632, 1545, 1620, 1542, 855, 1596, 865, 1667, 693, 885, 1716, 1519, 1167, 1296, 2209, 1760, 1952, 1493, 2020, 1482, 1534, 1866, 1694, 2008, 1566, 748, 1761, 825, 294, 1392, 1084, 2058, 621, 1315, 365, 1287, 198, 1028, 488, 1408, 249, 403, 1014, 1561, 324, 363, 1645, 1044, 193, 367, 2034, 1859, -251, 579, 750, 994, -243, 30, 1325, 879, -28, -169, 624, 917, -453, 159, 186, 1370, -614, 6, 537, 392, -94, -291, 781, 229, -128, -298, 245, 491, -701, -648, 972, 789, -501, -640, 178, 255, -365, -390, -255, 317, -958, -294, -191, 228, -775, -447, 157, -237, -657, -720, -407, 92, -117, -611, 334, -230, -679, -1084, -144, -317, -901, -861, -738, -360, -85, -727, -90, -787, 100, -22, -391, -263, -56, -73, -337, -754, 5, -189, -706, -624, 89, -344, -135, -1113, -353, -237, -684, -1135, -275, -1102, -269, -1203, 152, 145, -722, -1232, 49, 80, -1248, -776, -248, 391, -732, -547, 469, 218, -255, -864, 69, 366, -166, -485, -688, 191, -1212, -1196, -170, -169, -1308, -1631, 321, 470, -1419, -1243, -64, 272, -1361, -248, 492, 565, -721, -609, 195, 485, -573, -133, 427, 202, -171, -118, 199, 575, 2, -31, 694, 755, -1366, -39, 552, 557, -489, 271, 680, 537, 13, -453, 855, 954, -133, -52, -81, 738, -1169, 637, 1055, 1059, -95, 676, 1259, 1081, 489, 305, -449, 954, -534, 996, -969, 866, -1058, 1059, -1294, 618, -1416, 617, -458, 1366, -159, 1821, -774, -528, -14, 1110, -1202, -901, -772, 433, -1256, -1255, -1011, -302, -602, -585, -759, -1618, -760, -1549, -840, -1921, -816, -539, -1769, -2235, -227, -36, -2034, -1831, -2107, -1126, -2471, -1816, -1470, 252, -2701, -415, -571, -467, 1509, 1554, 2180, 1975, 2326, 2020 }; /* 4th LSF quantizer (MR122) */ __device__ static const Word32 dico4_lsf_5[DICO4_SIZE_5 * 4] = { -1857, -1681, -1857, -1755, -2056, -1150, -2134, -1654, -1619, -1099, -1704, -1131, -1345, -1608, -1359, -1638, -1338, -1293, -1325, -1265, -1664, -1649, -1487, -851, -1346, -1832, -1413, -2188, -1282, -681, -1785, -1649, -966, -1082, -1183, -1676, -1054, -1073, -1142, -1158, -1207, -744, -1274, -997, -934, -1383, -927, -1416, -1010, -1305, -783, -955, -1049, -900, -993, -817, -737, -823, -972, -1189, -738, -1094, -738, -1154, -784, -801, -810, -786, -892, -520, -1000, -818, -644, -965, -577, -882, -541, -694, -671, -917, -595, -642, -646, -615, -956, -621, -925, -515, -727, -483, -815, -485, -840, -578, -440, -713, -578, -325, -657, -670, -386, -570, -441, -666, -514, -787, -392, -529, -522, -453, -487, -423, -616, -585, -617, -157, -662, -268, -680, -348, -322, -323, -632, -444, -304, -430, -332, -458, -277, -468, -659, -793, -319, -636, -227, -554, -373, -347, -334, -210, -456, -192, -530, -242, -216, -198, -366, -370, -338, -161, -409, -748, -107, -380, -294, -643, -223, -665, -234, -741, -141, -496, -130, -510, -139, -327, -172, -305, -306, -580, -164, -263, -262, -172, -67, -402, 31, -366, -10, -436, -86, -527, 71, -377, -22, -609, -12, -678, -67, -319, 63, -191, 35, -181, -39, -242, 126, -167, -140, -544, 155, -297, 174, -297, 38, -8, 117, -380, 197, -452, 240, -522, 223, -103, 110, -187, 87, -155, 169, -47, 157, 26, -83, -100, 128, 80, 209, -62, 6, 7, 22, 5, 318, -20, 248, -45, -200, -63, 156, -69, 250, -183, 369, -126, -113, -76, -142, -122, -64, -254, -31, 35, -177, -71, -7, 171, 93, 27, 108, 212, -330, -209, -123, -70, -279, 95, -96, 20, -188, -61, -314, 87, -300, -78, -354, -134, 11, 122, -140, 122, -275, 152, -293, 140, -82, 138, -321, -111, -480, -156, -359, 76, -254, -40, -635, -96, -522, 79, -507, 8, -268, 303, -539, 68, -446, 61, -522, 306, 111, 189, -435, 122, -379, 166, -571, -398, -632, -74, -747, -95, -455, 194, -952, 83, -798, 192, -755, 192, -781, -162, -619, 234, -663, -297, -488, -109, -964, -132, -838, -68, -843, 58, -1112, -86, -805, -299, -944, -253, -778, -50, -965, -549, -352, -98, -992, -343, -1117, -315, -1117, -307, -1155, -374, -637, -230, -1166, -43, -1299, -100, -925, -393, -1274, -600, -689, -130, -1479, -312, -1321, -254, -1464, -442, -1292, -613, -1261, -503, -1501, -368, -1322, 26, -1432, -66, -1743, -161, -1644, -467, -1760, -548, -1393, -568, -1556, -871, -1495, -1034, -1387, -571, -1917, -528, -1783, -123, -1897, -231, -2054, -323, -2052, -906, -1976, -567, -1917, -620, -2047, -989, -1077, -370, -2031, -704, -2355, -749, -2740, -1089, -1909, 159, -2012, 248, -626, -123, -2339, -962, -669, -408, -1379, -1174, -452, -364, -1044, -735, -132, 183, -1620, -752, -547, -307, -777, -1261, -98, 41, -880, -1091, -257, 97, -1602, -1833, 31, -26, -644, -561, -180, -546, -385, -1095, -410, -802, -414, -827, -457, -970, -490, -1109, -215, -916, -144, -937, -493, -1269, -517, -1507, 181, 101, -332, -889, -836, -937, -559, -429, -629, -547, -183, -337, -545, -82, -250, -286, 5, -132, -348, -252, -293, -472, -158, 100, -29, 197, -236, -424, -861, -213, -140, -7, -427, -443, 187, -97, -684, -736, -293, 258, -368, -152, -150, 392, -609, 175, -142, 299, -138, 152, -119, 329, -486, -52, 293, 198, -183, 117, 175, 331, -58, -274, 231, 300, -288, 330, -305, 372, -111, 409, -9, 423, 83, 256, 67, 367, -19, 248, 91, 113, -35, 406, -191, 154, 238, 296, 5, 197, 141, 221, 313, 198, 211, 421, 244, 334, 88, 426, -243, 454, 202, 552, -5, 403, 291, 185, 219, 301, 251, 138, 128, 69, 197, 288, -140, -61, 188, 361, 197, 598, 442, 273, 290, 143, 472, 482, 157, 370, 415, 321, 372, 385, 402, 552, 155, 24, 550, 263, -11, 21, 360, 227, 147, -254, 424, 97, 366, -13, 375, 141, 449, 232, 396, 507, 474, 272, 701, 324, 362, -47, 587, 148, 543, 69, 400, -51, 561, 59, 220, -10, 352, 147, 206, 211, 653, 185, 563, 297, 565, 284, 594, 121, 766, 192, 398, 118, 642, 434, 233, 264, 481, 467, 129, -165, 699, 239, 90, 26, 342, 474, -55, 27, 388, 94, -172, 0, 725, 379, -60, 337, 370, 465, 95, 319, 806, 595, 78, 260, 497, 851, 210, 560, 458, 574, -464, 202, 497, 625, -202, 152, 48, 712, -20, 566, 100, 715, 455, 468, 411, 605, 319, 646, 195, 615, 401, 538, 680, 739, 201, 667, 434, 954, 454, 425, 646, 491, 606, 681, 416, 508, 497, 822, 426, 815, 660, 647, 628, 716, 697, 466, 618, 457, 685, 460, 365, 309, 721, 567, 836, 601, 609, 300, 825, 459, 943, 687, 681, 533, 915, 598, 591, 243, 876, 451, 874, 420, 786, 317, 732, 220, 922, 317, 1108, 367, 531, 466, 1028, 649, 1053, 615, 1034, 553, 829, 602, 1021, 799, 927, 803, 878, 763, 799, 496, 1373, 773, 585, 770, 803, 930, 1099, 793, 1222, 862, 1209, 895, 1025, 727, 772, 845, 1172, 1115, 867, 1021, 830, 1013, 841, 910, 506, 703, 1239, 1077, 620, 819, 1196, 1083, 1155, 1081, 1142, 907, 1547, 1121, 1309, 648, 1343, 612, 1484, 988, 1479, 937, 985, 1328, 955, 1341, 429, 910, 841, 1338, 564, 1179, 412, 1156, 1427, 1320, 1434, 1330, 640, 760, 1726, 1410, 190, 555, 1073, 1005, 426, 257, 839, 980, 235, 231, 1520, 1167, 109, 293, 1014, 1569, 305, 142, 1148, 539, -291, -108, 1213, 972, 22, -216, 667, 828, -482, 438, 453, 1431, -581, -422, 789, 387, -358, -454, 174, 780, -36, -372, 390, -134, -629, 160, -306, 751, -1258, -331, 177, 522, -248, 574, -251, 639, -531, 407, -596, 394, -419, 789, -617, 801, -986, 399, -857, 727, -7, 518, -703, 310, -1143, -24, -1002, 287, -960, 363, -1299, 312, -1534, 245, -1557, 305, 28, 153, -859, -175, -33, 332, -1398, -154, 212, 410, -593, -197, -1092, -704, -904, -65, 282, 367, -918, -686, 345, 93, -258, -357, 696, 644, -693, -28, 448, 493, -273, 193, 527, 546, -243, -513, 384, -136, 273, -353, 512, -142, 537, -198, 941, 750, 83, 248, 578, 861, -56, 592, 842, 44, 892, 24, 33, 890, -16, 982, 831, 1398, 1535, 1898, 1716, 1376, 1948, 1465 }; /* 5th LSF quantizer (MR122) */ __device__ static const Word32 dico5_lsf_5[DICO5_SIZE_5 * 4] = { -1002, -929, -1096, -1203, -641, -931, -604, -961, -779, -673, -835, -788, -416, -664, -458, -766, -652, -521, -662, -495, -1023, -509, -1023, -428, -444, -552, -368, -449, -479, -211, -1054, -903, -316, -249, -569, -591, -569, -275, -541, -191, -716, -188, -842, -264, -333, -248, -318, -228, -275, 1, -567, -228, -115, -221, -238, -374, -197, -507, -222, -579, -258, -432, -61, -244, -345, 2, -338, 39, -215, -169, -58, 0, -56, -6, -203, -131, 1, -186, -5, -211, 6, -380, 11, -418, -116, 131, -134, 113, 89, -4, 71, -2, -19, -192, 262, 24, 189, 151, -133, -109, 186, -153, 166, -219, 37, 139, 193, 171, 337, 124, 158, -61, 141, 226, -13, 190, 231, 34, 354, 109, 316, 201, 244, 164, 330, -85, 390, -84, 254, 327, 257, 335, 491, 147, 476, 105, 54, 77, 437, 370, 421, 314, 449, 342, 329, 126, 673, 292, 571, 388, 243, 193, 653, 320, 621, 280, 194, 380, 517, 581, 45, 323, 111, 422, 489, 395, 734, 534, 622, 546, 486, 502, 318, 572, 189, 550, 385, 422, -157, 153, -125, 382, -197, 386, -263, 334, 228, 697, -188, 1, 51, 297, -507, 213, -376, 397, -24, 255, -547, 89, -502, -94, 387, 179, -620, 68, -684, 112, -642, -350, -260, 172, -438, -324, 264, 648, -964, -4, -1121, 7, -134, 134, -1133, -306, 143, 96, -420, -497, -1221, -350, -1527, -685, -161, 72, 873, 691, 732, 283, 921, 353, 334, 475, 1095, 821, 864, 524, 843, 497, 714, 711, 788, 750, 1076, 714, 1204, 753 }; /* Scaling factors for the lsp variability operation */ __device__ static const Word16 lsf_hist_mean_scale[M] = { 20000, 20000, 20000, 20000, 20000, 18000, 16384, 8192, 0, 0 }; /* * The tables contains the following data: * * g_pitch (Q14), * g_fac (Q12), (g_code = g_code0*g_fac), * qua_ener_MR122 (Q10), (log2(g_fac)) * qua_ener (Q10) (20*log10(g_fac)) * * The log2() and log10() values are calculated on the fixed point value * (g_fac Q12) and not on the original floating point value of g_fac * to make the quantizer/MA predictdor use corresponding values. */ #define MR475_VQ_SIZE 256 /* The table contains the following data: * * g_pitch(0) (Q14) for sub- * g_fac(0) (Q12) frame 0 and 2 * g_pitch(1) (Q14) for sub- * g_fac(2) (Q12) frame 1 and 3 * */ __device__ static const Word32 table_gain_MR475[MR475_VQ_SIZE * 4] = { /* * g_pit(0), * g_fac(0), * g_pit(1), * g_fac(1) */ 812, 128, 542, 140, 2873, 1135, 2266, 3402, 2067, 563, 12677, 647, 4132, 1798, 5601, 5285, 7689, 374, 3735, 441, 10912, 2638, 11807, 2494, 20490, 797, 5218, 675, 6724, 8354, 5282, 1696, 1488, 428, 5882, 452, 5332, 4072, 3583, 1268, 2469, 901, 15894, 1005, 14982, 3271, 10331, 4858, 3635, 2021, 2596, 835, 12360, 4892, 12206, 1704, 13432, 1604, 9118, 2341, 3968, 1538, 5479, 9936, 3795, 417, 1359, 414, 3640, 1569, 7995, 3541, 11405, 645, 8552, 635, 4056, 1377, 16608, 6124, 11420, 700, 2007, 607, 12415, 1578, 11119, 4654, 13680, 1708, 11990, 1229, 7996, 7297, 13231, 5715, 2428, 1159, 2073, 1941, 6218, 6121, 3546, 1804, 8925, 1802, 8679, 1580, 13935, 3576, 13313, 6237, 6142, 1130, 5994, 1734, 14141, 4662, 11271, 3321, 12226, 1551, 13931, 3015, 5081, 10464, 9444, 6706, 1689, 683, 1436, 1306, 7212, 3933, 4082, 2713, 7793, 704, 15070, 802, 6299, 5212, 4337, 5357, 6676, 541, 6062, 626, 13651, 3700, 11498, 2408, 16156, 716, 12177, 751, 8065, 11489, 6314, 2256, 4466, 496, 7293, 523, 10213, 3833, 8394, 3037, 8403, 966, 14228, 1880, 8703, 5409, 16395, 4863, 7420, 1979, 6089, 1230, 9371, 4398, 14558, 3363, 13559, 2873, 13163, 1465, 5534, 1678, 13138, 14771, 7338, 600, 1318, 548, 4252, 3539, 10044, 2364, 10587, 622, 13088, 669, 14126, 3526, 5039, 9784, 15338, 619, 3115, 590, 16442, 3013, 15542, 4168, 15537, 1611, 15405, 1228, 16023, 9299, 7534, 4976, 1990, 1213, 11447, 1157, 12512, 5519, 9475, 2644, 7716, 2034, 13280, 2239, 16011, 5093, 8066, 6761, 10083, 1413, 5002, 2347, 12523, 5975, 15126, 2899, 18264, 2289, 15827, 2527, 16265, 10254, 14651, 11319, 1797, 337, 3115, 397, 3510, 2928, 4592, 2670, 7519, 628, 11415, 656, 5946, 2435, 6544, 7367, 8238, 829, 4000, 863, 10032, 2492, 16057, 3551, 18204, 1054, 6103, 1454, 5884, 7900, 18752, 3468, 1864, 544, 9198, 683, 11623, 4160, 4594, 1644, 3158, 1157, 15953, 2560, 12349, 3733, 17420, 5260, 6106, 2004, 2917, 1742, 16467, 5257, 16787, 1680, 17205, 1759, 4773, 3231, 7386, 6035, 14342, 10012, 4035, 442, 4194, 458, 9214, 2242, 7427, 4217, 12860, 801, 11186, 825, 12648, 2084, 12956, 6554, 9505, 996, 6629, 985, 10537, 2502, 15289, 5006, 12602, 2055, 15484, 1653, 16194, 6921, 14231, 5790, 2626, 828, 5615, 1686, 13663, 5778, 3668, 1554, 11313, 2633, 9770, 1459, 14003, 4733, 15897, 6291, 6278, 1870, 7910, 2285, 16978, 4571, 16576, 3849, 15248, 2311, 16023, 3244, 14459, 17808, 11847, 2763, 1981, 1407, 1400, 876, 4335, 3547, 4391, 4210, 5405, 680, 17461, 781, 6501, 5118, 8091, 7677, 7355, 794, 8333, 1182, 15041, 3160, 14928, 3039, 20421, 880, 14545, 852, 12337, 14708, 6904, 1920, 4225, 933, 8218, 1087, 10659, 4084, 10082, 4533, 2735, 840, 20657, 1081, 16711, 5966, 15873, 4578, 10871, 2574, 3773, 1166, 14519, 4044, 20699, 2627, 15219, 2734, 15274, 2186, 6257, 3226, 13125, 19480, 7196, 930, 2462, 1618, 4515, 3092, 13852, 4277, 10460, 833, 17339, 810, 16891, 2289, 15546, 8217, 13603, 1684, 3197, 1834, 15948, 2820, 15812, 5327, 17006, 2438, 16788, 1326, 15671, 8156, 11726, 8556, 3762, 2053, 9563, 1317, 13561, 6790, 12227, 1936, 8180, 3550, 13287, 1778, 16299, 6599, 16291, 7758, 8521, 2551, 7225, 2645, 18269, 7489, 16885, 2248, 17882, 2884, 17265, 3328, 9417, 20162, 11042, 8320, 1286, 620, 1431, 583, 5993, 2289, 3978, 3626, 5144, 752, 13409, 830, 5553, 2860, 11764, 5908, 10737, 560, 5446, 564, 13321, 3008, 11946, 3683, 19887, 798, 9825, 728, 13663, 8748, 7391, 3053, 2515, 778, 6050, 833, 6469, 5074, 8305, 2463, 6141, 1865, 15308, 1262, 14408, 4547, 13663, 4515, 3137, 2983, 2479, 1259, 15088, 4647, 15382, 2607, 14492, 2392, 12462, 2537, 7539, 2949, 12909, 12060, 5468, 684, 3141, 722, 5081, 1274, 12732, 4200, 15302, 681, 7819, 592, 6534, 2021, 16478, 8737, 13364, 882, 5397, 899, 14656, 2178, 14741, 4227, 14270, 1298, 13929, 2029, 15477, 7482, 15815, 4572, 2521, 2013, 5062, 1804, 5159, 6582, 7130, 3597, 10920, 1611, 11729, 1708, 16903, 3455, 16268, 6640, 9306, 1007, 9369, 2106, 19182, 5037, 12441, 4269, 15919, 1332, 15357, 3512, 11898, 14141, 16101, 6854, 2010, 737, 3779, 861, 11454, 2880, 3564, 3540, 9057, 1241, 12391, 896, 8546, 4629, 11561, 5776, 8129, 589, 8218, 588, 18728, 3755, 12973, 3149, 15729, 758, 16634, 754, 15222, 11138, 15871, 2208, 4673, 610, 10218, 678, 15257, 4146, 5729, 3327, 8377, 1670, 19862, 2321, 15450, 5511, 14054, 5481, 5728, 2888, 7580, 1346, 14384, 5325, 16236, 3950, 15118, 3744, 15306, 1435, 14597, 4070, 12301, 15696, 7617, 1699, 2170, 884, 4459, 4567, 18094, 3306, 12742, 815, 14926, 907, 15016, 4281, 15518, 8368, 17994, 1087, 2358, 865, 16281, 3787, 15679, 4596, 16356, 1534, 16584, 2210, 16833, 9697, 15929, 4513, 3277, 1085, 9643, 2187, 11973, 6068, 9199, 4462, 8955, 1629, 10289, 3062, 16481, 5155, 15466, 7066, 13678, 2543, 5273, 2277, 16746, 6213, 16655, 3408, 20304, 3363, 18688, 1985, 14172, 12867, 15154, 15703, 4473, 1020, 1681, 886, 4311, 4301, 8952, 3657, 5893, 1147, 11647, 1452, 15886, 2227, 4582, 6644, 6929, 1205, 6220, 799, 12415, 3409, 15968, 3877, 19859, 2109, 9689, 2141, 14742, 8830, 14480, 2599, 1817, 1238, 7771, 813, 19079, 4410, 5554, 2064, 3687, 2844, 17435, 2256, 16697, 4486, 16199, 5388, 8028, 2763, 3405, 2119, 17426, 5477, 13698, 2786, 19879, 2720, 9098, 3880, 18172, 4833, 17336, 12207, 5116, 996, 4935, 988, 9888, 3081, 6014, 5371, 15881, 1667, 8405, 1183, 15087, 2366, 19777, 7002, 11963, 1562, 7279, 1128, 16859, 1532, 15762, 5381, 14708, 2065, 20105, 2155, 17158, 8245, 17911, 6318, 5467, 1504, 4100, 2574, 17421, 6810, 5673, 2888, 16636, 3382, 8975, 1831, 20159, 4737, 19550, 7294, 6658, 2781, 11472, 3321, 19397, 5054, 18878, 4722, 16439, 2373, 20430, 4386, 11353, 26526, 11593, 3068, 2866, 1566, 5108, 1070, 9614, 4915, 4939, 3536, 7541, 878, 20717, 851, 6938, 4395, 16799, 7733, 10137, 1019, 9845, 964, 15494, 3955, 15459, 3430, 18863, 982, 20120, 963, 16876, 12887, 14334, 4200, 6599, 1220, 9222, 814, 16942, 5134, 5661, 4898, 5488, 1798, 20258, 3962, 17005, 6178, 17929, 5929, 9365, 3420, 7474, 1971, 19537, 5177, 19003, 3006, 16454, 3788, 16070, 2367, 8664, 2743, 9445, 26358, 10856, 1287, 3555, 1009, 5606, 3622, 19453, 5512, 12453, 797, 20634, 911, 15427, 3066, 17037, 10275, 18883, 2633, 3913, 1268, 19519, 3371, 18052, 5230, 19291, 1678, 19508, 3172, 18072, 10754, 16625, 6845, 3134, 2298, 10869, 2437, 15580, 6913, 12597, 3381, 11116, 3297, 16762, 2424, 18853, 6715, 17171, 9887, 12743, 2605, 8937, 3140, 19033, 7764, 18347, 3880, 20475, 3682, 19602, 3380, 13044, 19373, 10526, 23124 }; /* table used in 'high' rates: MR67 MR74 */ #define VQ_SIZE_HIGHRATES 128 __device__ static const Word32 table_gain_highrates[VQ_SIZE_HIGHRATES * 4] = { /* * Note: every 4th value (qua_ener) contains the original values from IS641 * to ensure bit-exactness; however, they are not exactly the * rounded value of (20*log10(g_fac)) */ /* * g_pit, * g_fac, * qua_ener_MR122, * qua_ener */ 577, 662, -2692, -16214, 806, 1836, -1185, -7135, 3109, 1052, -2008, -12086, 4181, 1387, -1600, -9629, 2373, 1425, -1560, -9394, 3248, 1985, -1070, -6442, 1827, 2320, -840, -5056, 941, 3314, -313, -1885, 2351, 2977, -471, -2838, 3616, 2420, -777, -4681, 3451, 3096, -414, -2490, 2955, 4301, 72, 434, 1848, 4500, 139, 836, 3884, 5416, 413, 2484, 1187, 7210, 835, 5030, 3083, 9000, 1163, 7002, 7384, 883, -2267, -13647, 5962, 1506, -1478, -8900, 5155, 2134, -963, -5800, 7944, 2009, -1052, -6335, 6507, 2250, -885, -5327, 7670, 2752, -588, -3537, 5952, 3016, -452, -2724, 4898, 3764, -125, -751, 6989, 3588, -196, -1177, 8174, 3978, -43, -260, 6064, 4404, 107, 645, 7709, 5087, 320, 1928, 5523, 6021, 569, 3426, 7769, 7126, 818, 4926, 6060, 7938, 977, 5885, 5594, 11487, 1523, 9172, 10581, 1356, -1633, -9831, 9049, 1597, -1391, -8380, 9794, 2035, -1033, -6220, 8946, 2415, -780, -4700, 10296, 2584, -681, -4099, 9407, 2734, -597, -3595, 8700, 3218, -356, -2144, 9757, 3395, -277, -1669, 10177, 3892, -75, -454, 9170, 4528, 148, 891, 10152, 5004, 296, 1781, 9114, 5735, 497, 2993, 10500, 6266, 628, 3782, 10110, 7631, 919, 5534, 8844, 8727, 1117, 6728, 8956, 12496, 1648, 9921, 12924, 976, -2119, -12753, 11435, 1755, -1252, -7539, 12138, 2328, -835, -5024, 11388, 2368, -810, -4872, 10700, 3064, -429, -2580, 12332, 2861, -530, -3192, 11722, 3327, -307, -1848, 11270, 3700, -150, -904, 10861, 4413, 110, 663, 12082, 4533, 150, 902, 11283, 5205, 354, 2132, 11960, 6305, 637, 3837, 11167, 7534, 900, 5420, 12128, 8329, 1049, 6312, 10969, 10777, 1429, 8604, 10300, 17376, 2135, 12853, 13899, 1681, -1316, -7921, 12580, 2045, -1026, -6179, 13265, 2439, -766, -4610, 14033, 2989, -465, -2802, 13452, 3098, -413, -2482, 12396, 3658, -167, -1006, 13510, 3780, -119, -713, 12880, 4272, 62, 374, 13533, 4861, 253, 1523, 12667, 5457, 424, 2552, 13854, 6106, 590, 3551, 13031, 6483, 678, 4084, 13557, 7721, 937, 5639, 12957, 9311, 1213, 7304, 13714, 11551, 1532, 9221, 12591, 15206, 1938, 11667, 15113, 1540, -1445, -8700, 15072, 2333, -832, -5007, 14527, 2511, -723, -4352, 14692, 3199, -365, -2197, 15382, 3560, -207, -1247, 14133, 3960, -50, -300, 15102, 4236, 50, 298, 14332, 4824, 242, 1454, 14846, 5451, 422, 2542, 15306, 6083, 584, 3518, 14329, 6888, 768, 4623, 15060, 7689, 930, 5602, 14406, 9426, 1231, 7413, 15387, 9741, 1280, 7706, 14824, 14271, 1844, 11102, 13600, 24939, 2669, 16067, 16396, 1969, -1082, -6517, 16817, 2832, -545, -3283, 15713, 2843, -539, -3248, 16104, 3336, -303, -1825, 16384, 3963, -49, -294, 16940, 4579, 165, 992, 15711, 4599, 171, 1030, 16222, 5448, 421, 2537, 16832, 6382, 655, 3945, 15745, 7141, 821, 4944, 16326, 7469, 888, 5343, 16611, 8624, 1100, 6622, 17028, 10418, 1379, 8303, 15905, 11817, 1565, 9423, 16878, 14690, 1887, 11360, 16515, 20870, 2406, 14483, 18142, 2083, -999, -6013, 19401, 3178, -375, -2257, 17508, 3426, -264, -1589, 20054, 4027, -25, -151, 18069, 4249, 54, 326, 18952, 5066, 314, 1890, 17711, 5402, 409, 2461, 19835, 6192, 610, 3676, 17950, 7014, 795, 4784, 21318, 7877, 966, 5816, 17910, 9289, 1210, 7283, 19144, 9290, 1210, 7284, 20517, 11381, 1510, 9089, 18075, 14485, 1866, 11234, 19999, 17882, 2177, 13108, 18842, 32764, 3072, 18494 }; /* table used in 'low' rates: MR475, MR515, MR59 */ #define VQ_SIZE_LOWRATES 64 __device__ static const Word32 table_gain_lowrates[VQ_SIZE_LOWRATES * 4] = { /* * g_pit, * g_fac, * qua_ener_MR122, * qua_ener */ 10813, 28753, 2879, 17333, 20480, 2785, -570, -3431, 18841, 6594, 703, 4235, 6225, 7413, 876, 5276, 17203, 10444, 1383, 8325, 21626, 1269, -1731, -10422, 21135, 4423, 113, 683, 11304, 1556, -1430, -8609, 19005, 12820, 1686, 10148, 17367, 2498, -731, -4398, 17858, 4833, 244, 1472, 9994, 2498, -731, -4398, 17530, 7864, 964, 5802, 14254, 1884, -1147, -6907, 15892, 3153, -387, -2327, 6717, 1802, -1213, -7303, 18186, 20193, 2357, 14189, 18022, 3031, -445, -2678, 16711, 5857, 528, 3181, 8847, 4014, -30, -180, 15892, 8970, 1158, 6972, 18022, 1392, -1594, -9599, 16711, 4096, 0, 0, 8192, 655, -2708, -16305, 15237, 13926, 1808, 10884, 14254, 3112, -406, -2444, 14090, 4669, 193, 1165, 5406, 2703, -614, -3697, 13434, 6553, 694, 4180, 12451, 901, -2237, -13468, 12451, 2662, -637, -3833, 3768, 655, -2708, -16305, 14745, 23511, 2582, 15543, 19169, 2457, -755, -4546, 20152, 5079, 318, 1913, 6881, 4096, 0, 0, 20480, 8560, 1089, 6556, 19660, 737, -2534, -15255, 19005, 4259, 58, 347, 7864, 2088, -995, -5993, 11468, 12288, 1623, 9771, 15892, 1474, -1510, -9090, 15728, 4628, 180, 1086, 9175, 1433, -1552, -9341, 16056, 7004, 793, 4772, 14827, 737, -2534, -15255, 15073, 2252, -884, -5321, 5079, 1228, -1780, -10714, 13271, 17326, 2131, 12827, 16547, 2334, -831, -5002, 15073, 5816, 518, 3118, 3932, 3686, -156, -938, 14254, 8601, 1096, 6598, 16875, 778, -2454, -14774, 15073, 3809, -107, -646, 6062, 614, -2804, -16879, 9338, 9256, 1204, 7251, 13271, 1761, -1247, -7508, 13271, 3522, -223, -1343, 2457, 1966, -1084, -6529, 11468, 5529, 443, 2668, 10485, 737, -2534, -15255, 11632, 3194, -367, -2212, 1474, 778, -2454, -14774 }; __device__ static const Word32 inter6[61] = { 29443, 28346, 25207, 20449, 14701, 8693, 3143, -1352, -4402, -5865, -5850, -4673, -2783, -672, 1211, 2536, 3130, 2991, 2259, 1170, 0, -1001, -1652, -1868, -1666, -1147, -464, 218, 756, 1060, 1099, 904, 550, 135, -245, -514, -634, -602, -451, -231, 0, 191, 308, 340, 296, 198, 78, -36, -120, -163, -165, -132, -79, -19, 34, 73, 91, 89, 70, 38, 0 }; /* * window for non-MR122 modesm; uses 40 samples lookahead * used only in BuildCNParam */ __device__ static const Word32 window_200_40[L_WINDOW] = { 2621, 2623, 2629, 2638, 2651, 2668, 2689, 2713, 2741, 2772, 2808, 2847, 2890, 2936, 2986, 3040, 3097, 3158, 3223, 3291, 3363, 3438, 3517, 3599, 3685, 3774, 3867, 3963, 4063, 4166, 4272, 4382, 4495, 4611, 4731, 4853, 4979, 5108, 5240, 5376, 5514, 5655, 5800, 5947, 6097, 6250, 6406, 6565, 6726, 6890, 7057, 7227, 7399, 7573, 7750, 7930, 8112, 8296, 8483, 8672, 8863, 9057, 9252, 9450, 9650, 9852, 10055, 10261, 10468, 10677, 10888, 11101, 11315, 11531, 11748, 11967, 12187, 12409, 12632, 12856, 13082, 13308, 13536, 13764, 13994, 14225, 14456, 14688, 14921, 15155, 15389, 15624, 15859, 16095, 16331, 16568, 16805, 17042, 17279, 17516, 17754, 17991, 18228, 18465, 18702, 18939, 19175, 19411, 19647, 19882, 20117, 20350, 20584, 20816, 21048, 21279, 21509, 21738, 21967, 22194, 22420, 22644, 22868, 23090, 23311, 23531, 23749, 23965, 24181, 24394, 24606, 24816, 25024, 25231, 25435, 25638, 25839, 26037, 26234, 26428, 26621, 26811, 26999, 27184, 27368, 27548, 27727, 27903, 28076, 28247, 28415, 28581, 28743, 28903, 29061, 29215, 29367, 29515, 29661, 29804, 29944, 30081, 30214, 30345, 30472, 30597, 30718, 30836, 30950, 31062, 31170, 31274, 31376, 31474, 31568, 31659, 31747, 31831, 31911, 31988, 32062, 32132, 32198, 32261, 32320, 32376, 32428, 32476, 32521, 32561, 32599, 32632, 32662, 32688, 32711, 32729, 32744, 32755, 32763, 32767, 32767, 32741, 32665, 32537, 32359, 32129, 31850, 31521, 31143, 30716, 30242, 29720, 29151, 28538, 27879, 27177, 26433, 25647, 24821, 23957, 23055, 22117, 21145, 20139, 19102, 18036, 16941, 15820, 14674, 13505, 12315, 11106, 9879, 8637, 7381, 6114, 4838, 3554, 2264, 971 }; /* comparision optimization tables */ /* definition of bad speech */ __device__ static const UWord8 table_speech_bad[9] = { 0, 0, 1, 1, 0, 0, 0, 1, 0 }; __device__ static const UWord8 table_SID[9] = { 0, 0, 0, 0, 1, 1, 1, 0, 0 }; __device__ static const UWord8 table_DTX[9] = { 0, 0, 0, 0, 1, 1, 1, 1, 0 }; __device__ static const UWord8 table_mute[9] = { 0, 0, 0, 0, 1, 0, 1, 1, 0 }; /* track start positions for fixed codebook routines */ __device__ static const Word8 startPos[16] = { 0, 2, 0, 3, 0, 2, 0, 3, 1, 3, 2, 4, 1, 4, 1, 4 }; __device__ static Float64 Dotproduct40(Float32 *x, Float32 *y) { Float64 acc; acc = x[0] * y[0] + x[1] * y[1] + x[2] * y[2] + x[3] * y[3]; acc += x[4] * y[4] + x[5] * y[5] + x[6] * y[6] + x[7] * y[7]; acc += x[8] * y[8] + x[9] * y[9] + x[10] * y[10] + x[11] * y[11]; acc += x[12] * y[12] + x[13] * y[13] + x[14] * y[14] + x[15] * y[15]; acc += x[16] * y[16] + x[17] * y[17] + x[18] * y[18] + x[19] * y[19]; acc += x[20] * y[20] + x[21] * y[21] + x[22] * y[22] + x[23] * y[23]; acc += x[24] * y[24] + x[25] * y[25] + x[26] * y[26] + x[27] * y[27]; acc += x[28] * y[28] + x[29] * y[29] + x[30] * y[30] + x[31] * y[31]; acc += x[32] * y[32] + x[33] * y[33] + x[34] * y[34] + x[35] * y[35]; acc += x[36] * y[36] + x[37] * y[37] + x[38] * y[38] + x[39] * y[39]; return(acc); } /* * CodAmrReset * * * Parameters: * state B: state structure * mode I: AMR mode * * Function: * Resets state memory * * Returns: * void */ __device__ static void Decoder_amr_reset(Decoder_amrState *state, enum Mode mode) { Word32 i; /* Cb_gain_average_reset */ memset(state->Cb_gain_averState.cbGainHistory, 0, L_CBGAINHIST << 2); state->Cb_gain_averState.hangVar = 0; state->Cb_gain_averState.hangCount = 0; /* Initialize static position */ state->exc = PIT_MAX + L_INTERPOL; /* Static vectors to zero */ memset(state->old_exc, 0, (PIT_MAX + L_INTERPOL) << 2); if (mode != MRDTX) memset(state->mem_syn, 0, M << 2); /* initialize pitch sharpening */ state->sharp = SHARPMIN; state->old_T0 = 40; /* Initialize state->lsp_old [] */ if (mode != MRDTX) { state->lsp_old[0] = 30000; state->lsp_old[1] = 26000; state->lsp_old[2] = 21000; state->lsp_old[3] = 15000; state->lsp_old[4] = 8000; state->lsp_old[5] = 0; state->lsp_old[6] = -8000; state->lsp_old[7] = -15000; state->lsp_old[8] = -21000; state->lsp_old[9] = -26000; } /* Initialize memories of bad frame handling */ state->prev_bf = 0; state->prev_pdf = 0; state->state = 0; state->T0_lagBuff = 40; state->inBackgroundNoise = 0; state->voicedHangover = 0; if (mode != MRDTX) memset(state->excEnergyHist, 0, 9 << 2); memset(state->ltpGainHistory, 0, 9 << 2); if (mode != MRDTX) { state->lsp_avg_st.lsp_meanSave[0] = 1384; state->lsp_avg_st.lsp_meanSave[1] = 2077; state->lsp_avg_st.lsp_meanSave[2] = 3420; state->lsp_avg_st.lsp_meanSave[3] = 5108; state->lsp_avg_st.lsp_meanSave[4] = 6742; state->lsp_avg_st.lsp_meanSave[5] = 8122; state->lsp_avg_st.lsp_meanSave[6] = 9863; state->lsp_avg_st.lsp_meanSave[7] = 11092; state->lsp_avg_st.lsp_meanSave[8] = 12714; state->lsp_avg_st.lsp_meanSave[9] = 13701; } memset(state->lsfState.past_r_q, 0, M << 2); /* Past dequantized lsfs */ state->lsfState.past_lsf_q[0] = 1384; state->lsfState.past_lsf_q[1] = 2077; state->lsfState.past_lsf_q[2] = 3420; state->lsfState.past_lsf_q[3] = 5108; state->lsfState.past_lsf_q[4] = 6742; state->lsfState.past_lsf_q[5] = 8122; state->lsfState.past_lsf_q[6] = 9863; state->lsfState.past_lsf_q[7] = 11092; state->lsfState.past_lsf_q[8] = 12714; state->lsfState.past_lsf_q[9] = 13701; for (i = 0; i < 5; i++) { state->ec_gain_p_st.pbuf[i] = 1640; } state->ec_gain_p_st.past_gain_pit = 0; state->ec_gain_p_st.prev_gp = 16384; for (i = 0; i < 5; i++) { state->ec_gain_c_st.gbuf[i] = 1; } state->ec_gain_c_st.past_gain_code = 0; state->ec_gain_c_st.prev_gc = 1; if (mode != MRDTX) { for (i = 0; i < NPRED; i++) { state->pred_state.past_qua_en[i] = MIN_ENERGY; state->pred_state.past_qua_en_MR122[i] = MIN_ENERGY_MR122; } } state->nodataSeed = 21845; /* Static vectors to zero */ memset(state->background_state.frameEnergyHist, 0, L_ENERGYHIST << 2); /* Initialize hangover handling */ state->background_state.bgHangover = 0; /* phDispReset */ memset(state->ph_disp_st.gainMem, 0, PHDGAINMEMSIZE << 2); state->ph_disp_st.prevState = 0; state->ph_disp_st.prevCbGain = 0; state->ph_disp_st.lockFull = 0; state->ph_disp_st.onset = 0; /* assume no onset in start */ if (mode != MRDTX) { state->dtxDecoderState.since_last_sid = 0; state->dtxDecoderState.true_sid_period_inv = 8192; state->dtxDecoderState.log_en = 3500; state->dtxDecoderState.old_log_en = 3500; /* low level noise for better performance in DTX handover cases*/ state->dtxDecoderState.pn_seed_rx = PN_INITIAL_SEED; /* Initialize state->lsp [] */ state->dtxDecoderState.lsp[0] = 30000; state->dtxDecoderState.lsp[1] = 26000; state->dtxDecoderState.lsp[2] = 21000; state->dtxDecoderState.lsp[3] = 15000; state->dtxDecoderState.lsp[4] = 8000; state->dtxDecoderState.lsp[5] = 0; state->dtxDecoderState.lsp[6] = -8000; state->dtxDecoderState.lsp[7] = -15000; state->dtxDecoderState.lsp[8] = -21000; state->dtxDecoderState.lsp[9] = -26000; /* Initialize state->lsp_old [] */ state->dtxDecoderState.lsp_old[0] = 30000; state->dtxDecoderState.lsp_old[1] = 26000; state->dtxDecoderState.lsp_old[2] = 21000; state->dtxDecoderState.lsp_old[3] = 15000; state->dtxDecoderState.lsp_old[4] = 8000; state->dtxDecoderState.lsp_old[5] = 0; state->dtxDecoderState.lsp_old[6] = -8000; state->dtxDecoderState.lsp_old[7] = -15000; state->dtxDecoderState.lsp_old[8] = -21000; state->dtxDecoderState.lsp_old[9] = -26000; state->dtxDecoderState.lsf_hist_ptr = 0; state->dtxDecoderState.log_pg_mean = 0; state->dtxDecoderState.log_en_hist_ptr = 0; /* initialize decoder lsf history */ state->dtxDecoderState.lsf_hist[0] = 1384; state->dtxDecoderState.lsf_hist[1] = 2077; state->dtxDecoderState.lsf_hist[2] = 3420; state->dtxDecoderState.lsf_hist[3] = 5108; state->dtxDecoderState.lsf_hist[4] = 6742; state->dtxDecoderState.lsf_hist[5] = 8122; state->dtxDecoderState.lsf_hist[6] = 9863; state->dtxDecoderState.lsf_hist[7] = 11092; state->dtxDecoderState.lsf_hist[8] = 12714; state->dtxDecoderState.lsf_hist[9] = 13701; for (i = 1; i < DTX_HIST_SIZE; i++) { memcpy(&state->dtxDecoderState.lsf_hist[M * i], &state-> dtxDecoderState.lsf_hist[0], M << 2); } memset(state->dtxDecoderState.lsf_hist_mean, 0, M * DTX_HIST_SIZE << 2); /* initialize decoder log frame energy */ for (i = 0; i < DTX_HIST_SIZE; i++) { state->dtxDecoderState.log_en_hist[i] = state->dtxDecoderState.log_en; } state->dtxDecoderState.log_en_adjust = 0; state->dtxDecoderState.dtxHangoverCount = DTX_HANG_CONST; state->dtxDecoderState.decAnaElapsedCount = 31; state->dtxDecoderState.sid_frame = 0; state->dtxDecoderState.valid_data = 0; state->dtxDecoderState.dtxHangoverAdded = 0; state->dtxDecoderState.dtxGlobalState = DTX; state->dtxDecoderState.data_updated = 0; } return; } /* * rx_dtx_handler * * * Parameters: * st->dtxGlobalState I: DTX state * st->since_last_sid B: Frames after last SID frame * st->data_updated I: SID update flag * st->decAnaElapsedCount B: state machine that synch with the GSMEFR txDtx machine * st->dtxHangoverAdded B: DTX hangover * st->sid_frame O: SID frame indicator * st->valid_data O: Vaild data indicator * frame_type O: Frame type * * Function: * Find the new DTX state * * Returns: * DTXStateType DTX, DTX_MUTE or SPEECH */ __device__ static enum DTXStateType rx_dtx_handler(dtx_decState *st, enum RXFrameType frame_type) { enum DTXStateType newState; enum DTXStateType encState; /* DTX if SID frame or previously in DTX{_MUTE} and (NO_RX OR BAD_SPEECH) */ if (table_SID[frame_type] | ((st->dtxGlobalState != SPEECH) & table_speech_bad[frame_type])) { newState = DTX; /* stay in mute for these input types */ if ((st->dtxGlobalState == DTX_MUTE) & table_mute[frame_type]) { newState = DTX_MUTE; } /* * evaluate if noise parameters are too old * since_last_sid is reset when CN parameters have been updated */ st->since_last_sid += 1; /* no update of sid parameters in DTX for a long while */ if ((frame_type != RX_SID_UPDATE) & (st->since_last_sid > DTX_MAX_EMPTY_THRESH)) { newState = DTX_MUTE; } } else { newState = SPEECH; st->since_last_sid = 0; } /* * reset the decAnaElapsed Counter when receiving CNI data the first * time, to robustify counter missmatch after handover * this might delay the bwd CNI analysis in the new decoder slightly. */ if ((st->data_updated == 0) & (frame_type == RX_SID_UPDATE)) { st->decAnaElapsedCount = 0; } /* * update the SPE-SPD DTX hangover synchronization * to know when SPE has added dtx hangover */ st->decAnaElapsedCount += 1; st->dtxHangoverAdded = 0; encState = SPEECH; if (table_DTX[frame_type]) { encState = DTX; if ((frame_type == RX_NO_DATA) & (newState == SPEECH)) { encState = SPEECH; } } if (encState == SPEECH) { st->dtxHangoverCount = DTX_HANG_CONST; } else { if (st->decAnaElapsedCount > DTX_ELAPSED_FRAMES_THRESH) { st->dtxHangoverAdded = 1; st->decAnaElapsedCount = 0; st->dtxHangoverCount = 0; } else if (st->dtxHangoverCount == 0) { st->decAnaElapsedCount = 0; } else { st->dtxHangoverCount -= 1; } } if (newState != SPEECH) { /* * DTX or DTX_MUTE * CN data is not in a first SID, first SIDs are marked as SID_BAD * but will do backwards analysis if a hangover period has been added * according to the state machine above */ st->sid_frame = 0; st->valid_data = 0; if (frame_type == RX_SID_FIRST) { st->sid_frame = 1; } else if (frame_type == RX_SID_UPDATE) { st->sid_frame = 1; st->valid_data = 1; } else if (frame_type == RX_SID_BAD) { st->sid_frame = 1; /* use old data */ st->dtxHangoverAdded = 0; } } /* newState is used by both SPEECH AND DTX synthesis routines */ return newState; } /* * Lsf_lsp * * * Parameters: * lsf I: vector of LSFs * lsp O: vector of LSPs * * Function: * Transformation lsf to lsp, order M * * Returns: * void */ __device__ static void Lsf_lsp(Word32 lsf[], Word32 lsp[]) { Word32 i, ind, offset, tmp; for (i = 0; i < M; i++) { /* ind = b8-b15 of lsf[i] */ ind = lsf[i] >> 8; /* offset = b0-b7 of lsf[i] */ offset = lsf[i] & 0x00ff; /* lsp[i] = table[ind]+ ((table[ind+1]-table[ind])*offset) / 256 */ tmp = ((cos_table[ind + 1] - cos_table[ind])*offset) << 1; lsp[i] = cos_table[ind] + (tmp >> 9); } return; } /* * D_plsf_3 * * * Parameters: * st->past_lsf_q I: Past dequantized LFSs * st->past_r_q B: past quantized residual * mode I: AMR mode * bfi B: bad frame indicator * indice I: quantization indices of 3 submatrices, Q0 * lsp1_q O: quantized 1st LSP vector * * Function: * Decodes the LSP parameters using the received quantization indices. * 1st order MA prediction and split by 3 vector quantization (split-VQ) * * Returns: * void */ __device__ static void D_plsf_3(D_plsfState *st, enum Mode mode, Word16 bfi, Word16 * indice, Word32 *lsp1_q) { Word32 lsf1_r[M], lsf1_q[M]; Word32 i, index, temp; const Word32 *p_cb1, *p_cb2, *p_cb3, *p_dico; /* if bad frame */ if (bfi != 0) { /* use the past LSFs slightly shifted towards their mean */ for (i = 0; i < M; i++) { /* lsfi_q[i] = ALPHA*past_lsf_q[i] + ONE_ALPHA*meanLsf[i]; */ lsf1_q[i] = ((st->past_lsf_q[i] * ALPHA) >> 15) + ((mean_lsf_3[i] * ONE_ALPHA) >> 15); } /* estimate past quantized residual to be used in next frame */ if (mode != MRDTX) { for (i = 0; i < M; i++) { /* temp = meanLsf[i] + pastR2_q[i] * pred_fac; */ temp = mean_lsf_3[i] + ((st->past_r_q[i] * pred_fac[i]) >> 15); st->past_r_q[i] = lsf1_q[i] - temp; } } else { for (i = 0; i < M; i++) { /* temp = meanLsf[i] + pastR2_q[i]; */ temp = mean_lsf_3[i] + st->past_r_q[i]; st->past_r_q[i] = lsf1_q[i] - temp; } } } /* if good LSFs received */ else { if ((mode == MR475) | (mode == MR515)) { /* MR475, MR515 */ p_cb1 = dico1_lsf_3; p_cb2 = dico2_lsf_3; p_cb3 = mr515_3_lsf; } else if (mode == MR795) { /* MR795 */ p_cb1 = mr795_1_lsf; p_cb2 = dico2_lsf_3; p_cb3 = dico3_lsf_3; } else { /* MR59, MR67, MR74, MR102, MRDTX */ p_cb1 = dico1_lsf_3; p_cb2 = dico2_lsf_3; p_cb3 = dico3_lsf_3; } /* decode prediction residuals from 3 received indices */ index = *indice++; p_dico = &p_cb1[index + index + index]; index = *indice++; lsf1_r[0] = *p_dico++; lsf1_r[1] = *p_dico++; lsf1_r[2] = *p_dico++; if ((mode == MR475) | (mode == MR515)) { /* MR475, MR515 only using every second entry */ index = index << 1; } p_dico = &p_cb2[index + index + index]; index = *indice++; lsf1_r[3] = *p_dico++; lsf1_r[4] = *p_dico++; lsf1_r[5] = *p_dico++; p_dico = &p_cb3[index << 2]; lsf1_r[6] = *p_dico++; lsf1_r[7] = *p_dico++; lsf1_r[8] = *p_dico++; lsf1_r[9] = *p_dico++; /* Compute quantized LSFs and update the past quantized residual */ if (mode != MRDTX) { for (i = 0; i < M; i++) { lsf1_q[i] = lsf1_r[i] + (mean_lsf_3[i] + ((st->past_r_q[i] * pred_fac[i]) >> 15)); } memcpy(st->past_r_q, lsf1_r, M << 2); } else { for (i = 0; i < M; i++) { lsf1_q[i] = lsf1_r[i] + (mean_lsf_3[i] + st->past_r_q[i]); } memcpy(st->past_r_q, lsf1_r, M << 2); } } /* verification that LSFs has minimum distance of LSF_GAP Hz */ temp = LSF_GAP; for (i = 0; i < M; i++) { if (lsf1_q[i] < temp) { lsf1_q[i] = temp; } temp = lsf1_q[i] + LSF_GAP; } memcpy(st->past_lsf_q, lsf1_q, M << 2); /* convert LSFs to the cosine domain */ Lsf_lsp(lsf1_q, lsp1_q); return; } /* * pseudonoise * * * Parameters: * shift_reg B: Old CN generator shift register state * no_bits I: Number of bits * * Function: * pseudonoise * * Returns: * noise_bits */ __device__ static Word32 pseudonoise(Word32 *shift_reg, Word32 no_bits) { Word32 noise_bits, Sn, i; Word32 s_reg; s_reg = *shift_reg; noise_bits = 0; for (i = 0; i < no_bits; i++) { /* State n == 31 */ Sn = s_reg & 0x00000001L; /* State n == 3 */ if (s_reg & 0x10000000L) { Sn = Sn ^ 0x1L; } else { Sn = Sn ^ 0x0L; } noise_bits = (noise_bits << 1) | (s_reg & 1); s_reg = s_reg >> 1; if (Sn & 1) { s_reg = s_reg | 0x40000000L; } } *shift_reg = s_reg; return noise_bits; } /* * Lsp_lsf * * * Parameters: * lsp I: LSP vector (range: -1<=val<1) * lsf O: LSF vector Old CN generator shift register state * * Function: * Transformation lsp to lsf, LPC order M * lsf[i] = arccos(lsp[i])/(2*pi) * * Returns: * void */ __device__ static void Lsp_lsf(Word32 lsp[], Word32 lsf[]) { Word32 i, ind = 63; /* begin at end of table -1 */ for (i = M - 1; i >= 0; i--) { /* find value in table that is just greater than lsp[i] */ while (cos_table[ind] < lsp[i]) { ind--; } lsf[i] = ((((lsp[i] - cos_table[ind]) * acos_slope[ind]) + 0x800) >> 12) + (ind << 8); } return; } /* * Reorder_lsf * * * Parameters: * lsf B: vector of LSFs (range: 0<=val<=0.5) * min_dist I: minimum required distance * * Function: * Make sure that the LSFs are properly ordered and to keep a certain minimum * distance between adjacent LSFs. LPC order = M. * * Returns: * void */ __device__ static void Reorder_lsf(Word32 *lsf, Word32 min_dist) { Word32 lsf_min, i; lsf_min = min_dist; for (i = 0; i < M; i++) { if (lsf[i] < lsf_min) { lsf[i] = lsf_min; } lsf_min = lsf[i] + min_dist; } } /* VC5.0 Global optimization does not work with this function */ #if _MSC_VER == 1100 #pragma optimize( "g", off ) #endif /* * Get_lsp_pol * * * Parameters: * lsp I: line spectral frequencies * f O: polynomial F1(z) or F2(z) * * Function: * Find the polynomial F1(z) or F2(z) from the LSPs. * * F1(z) = product ( 1 - 2 lsp[i] z^-1 + z^-2 ) * i=0,2,4,6,8 * F2(z) = product ( 1 - 2 lsp[i] z^-1 + z^-2 ) * i=1,3,5,7,9 * * where lsp[] is the LSP vector in the cosine domain. * * The expansion is performed using the following recursion: * * f[0] = 1 * b = -2.0 * lsp[0] * f[1] = b * for i=2 to 5 do * b = -2.0 * lsp[2*i-2]; * f[i] = b*f[i-1] + 2.0*f[i-2]; * for j=i-1 down to 2 do * f[j] = f[j] + b*f[j-1] + f[j-2]; * f[1] = f[1] + b; * * Returns: * void */ __device__ static void Get_lsp_pol(Word32 *lsp, Word32 *f) { volatile Word32 f0, f1, f2, f3, f4, f5; Word32 l1, l2, l3, l4; /* f[0] = 1.0; */ f0 = 16777216L; /* f1 = *lsp * -1024; */ f1 = -lsp[0] << 10; l1 = lsp[2]; l2 = lsp[4]; l3 = lsp[6]; l4 = lsp[8]; f2 = f0 << 1; f2 -= (((f1 >> 16) * l1) + (((f1 & 0xFFFE) * l1) >> 16)) << 2; f1 -= l1 << 10; f3 = f1 << 1; f3 -= (((f2 >> 16) * l2) + (((f2 & 0xFFFE) * l2) >> 16)) << 2; f2 += f0; f2 -= (((f1 >> 16) * l2) + (((f1 & 0xFFFE) * l2) >> 16)) << 2; f1 -= l2 << 10; f4 = f2 << 1; f4 -= (((f3 >> 16) * l3) + (((f3 & 0xFFFE) * l3) >> 16)) << 2; f3 += f1; f3 -= (((f2 >> 16) * l3) + (((f2 & 0xFFFE) * l3) >> 16)) << 2; f2 += f0; f2 -= (((f1 >> 16) * l3) + (((f1 & 0xFFFE) * l3) >> 16)) << 2; f1 -= l3 << 10; f5 = f3 << 1; f5 -= (((f4 >> 16) * l4) + (((f4 & 0xFFFE) * l4) >> 16)) << 2; f4 += f2; f4 -= (((f3 >> 16) * l4) + (((f3 & 0xFFFE) * l4) >> 16)) << 2; f3 += f1; f3 -= (((f2 >> 16) * l4) + (((f2 & 0xFFFE) * l4) >> 16)) << 2; f2 += f0; f2 -= (((f1 >> 16) * l4) + (((f1 & 0xFFFE) * l4) >> 16)) << 2; f1 -= l4 << 10; f[0] = f0; f[1] = f1; f[2] = f2; f[3] = f3; f[4] = f4; f[5] = f5; return; } #if _MSC_VER == 1100 #pragma optimize( "", on ) #endif /* * Lsp_Az * * * Parameters: * lsp I: Line spectral frequencies * a O: Predictor coefficients * * Function: * Converts from the line spectral pairs (LSP) to LP coefficients, * for a 10th order filter. * * Find the coefficients of F1(z) and F2(z) * Multiply F1(z) by 1+z^{-1} and F2(z) by 1-z^{-1} * A(z) = ( F1(z) + F2(z) ) / 2 * * Returns: * void */ __device__ static void Lsp_Az(Word32 lsp[], Word32 a[]) { Word32 f1[6], f2[6]; Word32 T0, i, j; Get_lsp_pol(&lsp[0], f1); Get_lsp_pol(&lsp[1], f2); for (i = 5; i > 0; i--) { f1[i] += f1[i - 1]; f2[i] -= f2[i - 1]; } a[0] = 4096; for (i = 1, j = 10; i <= 5; i++, j--) { T0 = f1[i] + f2[i]; a[i] = (Word16)(T0 >> 13); /* emulate fixed point bug */ if ((T0 & 4096) != 0) { a[i]++; } T0 = f1[i] - f2[i]; a[j] = (Word16)(T0 >> 13); /* emulate fixed point bug */ if ((T0 & 4096) != 0) { a[j]++; } } return; } /* * A_Refl * * * Parameters: * a I: Directform coefficients * refl O: Reflection coefficients * * Function: * Converts from the directform coefficients to reflection coefficients * * Returns: * void */ __device__ static void A_Refl(Word32 a[], Word32 refl[]) { /* local variables */ int normShift; Word32 aState[M], bState[M]; Word32 normProd, acc, temp, mult, scale, i, j; /* initialize states */ memcpy(aState, a, M << 2); /* backward Levinson recursion */ for (i = M - 1; i >= 0; i--) { if (labs(aState[i]) >= 4096) { goto ExitRefl; } refl[i] = aState[i] << 3; temp = (refl[i] * refl[i]) << 1; acc = (MAX_32 - temp); normShift = 0; if (acc != 0) { temp = acc; while (!(temp & 0x40000000)) { normShift++; temp = temp << 1; } } else { normShift = 0; } scale = 15 - normShift; acc = (acc << normShift); temp = (acc + (Word32)0x00008000L); if (temp > 0) { normProd = temp >> 16; mult = 0x20000000L / normProd; } else mult = 16384; for (j = 0; j < i; j++) { acc = aState[j] << 16; acc -= (refl[i] * aState[i - j - 1]) << 1; temp = (acc + (Word32)0x00008000L) >> 16; temp = (mult * temp) << 1; if (scale > 0) { if ((temp & ((Word32)1 << (scale - 1))) != 0) { temp = (temp >> scale) + 1; } else temp = (temp >> scale); } else temp = (temp >> scale); if (labs(temp) > 32767) { goto ExitRefl; } bState[j] = temp; } memcpy(aState, bState, i << 2); } return; ExitRefl: memset(refl, 0, M << 2); } /* * Log2_norm * * * Parameters: * x I: input value * exp I: exponent * exponent O: Integer part of Log2. (range: 0<=val<=30) * fraction O: Fractional part of Log2. (range: 0<=val<1) * * Function: * Computes log2 * * Computes log2(L_x, exp), where L_x is positive and * normalized, and exp is the normalisation exponent * If L_x is negative or zero, the result is 0. * * The function Log2(L_x) is approximated by a table and linear * interpolation. The following steps are used to compute Log2(L_x) * * exponent = 30-normExponent * i = bit25-b31 of L_x; 32<=i<=63 (because of normalization). * a = bit10-b24 * i -=32 * fraction = table[i]<<16 - (table[i] - table[i+1]) * a * 2 * * Returns: * void */ __device__ static void Log2_norm(Word32 x, Word32 exp, Word32 *exponent, Word32 * fraction) { Word32 y, i, a; if (x <= 0) { *exponent = 0; *fraction = 0; return; } /* Extract b25-b31 */ i = x >> 25; i = i - 32; /* Extract b10-b24 of fraction */ a = x >> 9; a = a & 0xFFFE; /* 2a */ /* fraction */ y = (log2_table[i] << 16) - a * (log2_table[i] - log2_table[i + 1]); *fraction = y >> 16; *exponent = 30 - exp; return; } /* * Log2 * * * Parameters: * x I: input value * exponent O: Integer part of Log2. (range: 0<=val<=30) * fraction O: Fractional part of Log2. (range: 0<=val<1) * * Function: * Computes log2(L_x) * If x is negative or zero, the result is 0. * * Returns: * void */ __device__ static void Log2(Word32 x, Word32 *exponent, Word32 *fraction) { int tmp, exp = 0; if (x != 0) { tmp = x; while (!((tmp & 0x80000000) ^ ((tmp & 0x40000000) << 1))) { exp++; tmp = tmp << 1; } } Log2_norm(x << exp, exp, exponent, fraction); } /* * Pow2 * * * Parameters: * exponent I: Integer part. (range: 0<=val<=30) * fraction O: Fractional part. (range: 0.0<=val<1.0) * * Function: * pow(2.0, exponent.fraction) * * The function Pow2(L_x) is approximated by a table and linear interpolation. * * i = bit10-b15 of fraction, 0 <= i <= 31 * a = biT0-b9 of fraction * x = table[i]<<16 - (table[i] - table[i+1]) * a * 2 * x = L_x >> (30-exponent) (with rounding) * * Returns: * result (range: 0<=val<=0x7fffffff) */ __device__ static Word32 Pow2(Word32 exponent, Word32 fraction) { Word32 i, a, tmp, x, exp; /* Extract b10-b16 of fraction */ i = fraction >> 10; /* Extract b0-b9 of fraction */ a = (fraction << 5) & 0x7fff; /* table[i] << 16 */ x = pow2_table[i] << 16; /* table[i] - table[i+1] */ tmp = pow2_table[i] - pow2_table[i + 1]; /* L_x -= tmp*a*2 */ x -= (tmp * a) << 1; if (exponent >= -1) { exp = (30 - exponent); /* Rounding */ if ((x & ((Word32)1 << (exp - 1))) != 0) { x = (x >> exp) + 1; } else x = x >> exp; } else x = 0; return(x); } /* * Build_CN_code * * * Parameters: * seed B: Old CN generator shift register state * cod O: Generated CN fixed codebook vector * * Function: * Generate CN fixed codebook vector * * Returns: * void */ __device__ static void Build_CN_code(Word32 *seed, Word32 cod[]) { Word32 i, j, k; memset(cod, 0, L_SUBFR << 2); for (k = 0; k < 10; k++) { i = pseudonoise(seed, 2); /* generate pulse position */ i = (i * 20) >> 1; i = (i + k); j = pseudonoise(seed, 1); /* generate sign */ if (j > 0) { cod[i] = 4096; } else { cod[i] = -4096; } } return; } /* * Build_CN_param * * * Parameters: * seed B: Old CN generator shift register state * nParam I: number of params * paramSizeTable I: size of params * parm O: CN Generated params * * Function: * Generate parameters for comfort noise generation * * Returns: * void */ __device__ static void Build_CN_param(Word16 *seed, enum Mode mode, Word16 parm[]) { Word32 i; const Word32 *p; *seed = (Word16)((*seed * 31821) + 13849L); p = &window_200_40[*seed & 0x7F]; switch (mode) { case MR122: for (i = 0; i < PRMNO_MR122; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR122[i])); } break; case MR102: for (i = 0; i < PRMNO_MR102; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR102[i])); } break; case MR795: for (i = 0; i < PRMNO_MR795; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR795[i])); } break; case MR74: for (i = 0; i < PRMNO_MR74; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR74[i])); } break; case MR67: for (i = 0; i < PRMNO_MR67; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR67[i])); } break; case MR59: for (i = 0; i < PRMNO_MR59; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR59[i])); } break; case MR515: for (i = 0; i < PRMNO_MR515; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR515[i])); } break; case MR475: for (i = 0; i < PRMNO_MR475; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR475[i])); } break; } } /* * Syn_filt * * * Parameters: * a I: prediction coefficients [M+1] * x I: input signal * y O: output signal * lg I: size of filtering * mem B: memory associated with this filtering * update I: 0=no update, 1=update of memory. * * Function: * Perform synthesis filtering through 1/A(z). * * Returns: * void */ __device__ static Word32 Syn_filt(Word32 a[], Word32 x[], Word32 y[], Word32 lg, Word32 mem[] , Word32 update) { Word32 tmp[50]; /* malloc is slow */ Word32 s, a0, overflow = 0; Word32 *yy, *yy_limit; /* Copy mem[] to yy[] */ memcpy(tmp, mem, 40); yy = tmp + M; yy_limit = yy + lg; a0 = a[0]; /* Do the filtering. */ while (yy < yy_limit) { s = *x++ * a0; s -= yy[-1] * a[1]; s -= yy[-2] * a[2]; s -= yy[-3] * a[3]; s -= yy[-4] * a[4]; s -= yy[-5] * a[5]; s -= yy[-6] * a[6]; s -= yy[-7] * a[7]; s -= yy[-8] * a[8]; s -= yy[-9] * a[9]; s -= yy[-10] * a[10]; if (labs(s) < 0x7ffffff) *yy = (s + 0x800L) >> 12; else if (s > 0) { *yy = 32767; overflow = 1; } else { *yy = -32768; overflow = 1; } yy++; } memcpy(y, &tmp[M], lg << 2); /* Update of memory if update==1 */ if (update) { memcpy(mem, &y[lg - M], 40); } return overflow; } /* * Syn_filt_overflow * * * Parameters: * a I: prediction coefficients [M+1] * x I: input signal * y O: output signal * lg I: size of filtering * mem B: memory associated with this filtering * update I: 0=no update, 1=update of memory. * * Function: * Perform synthesis filtering through 1/A(z). * Saturate after every multiplication. * Returns: * void */ __device__ static void Syn_filt_overflow(Word32 a[], Word32 x[], Word32 y[], Word32 lg, Word32 mem[] , Word32 update) { Word32 tmp[50]; /* malloc is slow */ Word32 i, j, s, a0; Word32 *yy; /* Copy mem[] to yy[] */ memcpy(tmp, mem, 40); yy = tmp + M; a0 = a[0]; /* Do the filtering. */ for (i = 0; i < lg; i++) { s = x[i] * a0; for (j = 1; j <= M; j++) { s -= a[j] * yy[-j]; if (s > 1073741823) { s = 1073741823; } else if (s < -1073741824) { s = -1073741824; } } if (labs(s) < 0x7FFE800) *yy = (s + 0x800L) >> 12; else if (s > 0) { *yy = 32767; } else { *yy = -32768; } yy++; } memcpy(y, &tmp[M], lg << 2); /* Update of memory if update==1 */ if (update) { memcpy(mem, &y[lg - M], 40); } return; } /* * dtx_dec * * * Parameters: * st B: DTX state struct * mem_syn I: AMR decoder state * lsfState B: LSF state struct * pred_state->past_qua_en O: table of past quantized energies * pred_state->past_qua_en_MR122 O: table of past quantized energies MR122 * averState->hangVar O: * averState->hangCount O: hangover variable * new_state I: new DTX state * mode I: AMR mode * parm I: vector of synthesis parameters * synth O: synthesised speech * A_t O: decoded LP filter in 4 subframes * * Function: * DTX * * Returns: * void */ __device__ static void dtx_dec(dtx_decState *st, Word32 *mem_syn, D_plsfState *lsfState, dec_gc_predState *pred_state, Cb_gain_averageState *averState, enum DTXStateType new_state, enum Mode mode, Word16 parm[], Word32 synth[], Word32 A_t[]) { Word32 ex[L_SUBFR], acoeff[11], acoeff_variab[M + 1], lsp_int[M]; Word32 refl[M], lsf[M], lsf_int[M], lsf_int_variab[M], lsp_int_variab[M]; Word32 i, j, int_fac, log_en_int, pred_err, log_pg_e, log_pg_m, log_pg; Word32 negative, lsf_mean, lsf_variab_index, lsf_variab_factor, ptr; Word16 log_en_index, log_en_int_e, log_en_int_m, level, ma_pred_init, tmp_int_length; if ((st->dtxHangoverAdded != 0) & (st->sid_frame != 0)) { /* * sidFirst after dtx hangover period * or sidUpd after dtxhangover */ /* set log_en_adjust to correct value */ st->log_en_adjust = dtx_log_en_adjust[mode]; ptr = st->lsf_hist_ptr + M; if (ptr == 80) { ptr = 0; } memcpy(&st->lsf_hist[ptr], &st->lsf_hist[st->lsf_hist_ptr], M << 2); ptr = st->log_en_hist_ptr + 1; if (ptr == DTX_HIST_SIZE) { ptr = 0; } st->log_en_hist[ptr] = st->log_en_hist[st->log_en_hist_ptr]; /* Q11 */ /* * compute mean log energy and lsp * from decoded signal (SID_FIRST) */ st->log_en = 0; memset(lsf, 0, M << 2); /* average energy and lsp */ for (i = 0; i < DTX_HIST_SIZE; i++) { st->log_en = st->log_en + (st->log_en_hist[i] >> 3); for (j = 0; j < M; j++) { lsf[j] += st->lsf_hist[i * M + j]; } } for (j = 0; j < M; j++) { lsf[j] = lsf[j] >> 3; /* divide by 8 */ } Lsf_lsp(lsf, st->lsp); /* * make log_en speech coder mode independent * added again later before synthesis */ st->log_en = st->log_en - st->log_en_adjust; /* compute lsf variability vector */ memcpy(st->lsf_hist_mean, st->lsf_hist, 80 << 2); for (i = 0; i < M; i++) { lsf_mean = 0; /* compute mean lsf */ for (j = 0; j < 8; j++) { lsf_mean += st->lsf_hist_mean[i + j * M]; } lsf_mean = lsf_mean >> 3; /* * subtract mean and limit to within reasonable limits * moreover the upper lsf's are attenuated */ for (j = 0; j < 8; j++) { /* subtract mean */ st->lsf_hist_mean[i + j * M] = st->lsf_hist_mean[i + j * M] - lsf_mean; /* attenuate deviation from mean, especially for upper lsf's */ st->lsf_hist_mean[i + j * M] = (st->lsf_hist_mean[i + j * M] * lsf_hist_mean_scale[i]) >> 15; /* limit the deviation */ if (st->lsf_hist_mean[i + j * M] < 0) { negative = 1; } else { negative = 0; } st->lsf_hist_mean[i + j * M] = labs(st->lsf_hist_mean[i + j * M]); /* apply soft limit */ if (st->lsf_hist_mean[i + j * M] > 655) { st->lsf_hist_mean[i + j * M] = 655 + ((st->lsf_hist_mean[i + j * M] - 655) >> 2); } /* apply hard limit */ if (st->lsf_hist_mean[i + j * M] > 1310) { st->lsf_hist_mean[i + j * M] = 1310; } if (negative != 0) { st->lsf_hist_mean[i + j * M] = -st->lsf_hist_mean[i + j * M]; } } } } if (st->sid_frame != 0) { /* * Set old SID parameters, always shift * even if there is no new valid_data */ memcpy(st->lsp_old, st->lsp, M << 2); st->old_log_en = st->log_en; if (st->valid_data != 0) /* new data available (no CRC) */ { /* Compute interpolation factor, since the division only works * for values of since_last_sid < 32 we have to limit the * interpolation to 32 frames */ tmp_int_length = st->since_last_sid; st->since_last_sid = 0; if (tmp_int_length > 32) { tmp_int_length = 32; } if (tmp_int_length >= 2) { st->true_sid_period_inv = 0x2000000 / (tmp_int_length << 10); } else { st->true_sid_period_inv = 16384; /* 0.5 it Q15 */ } memcpy(lsfState->past_r_q, &past_rq_init[parm[0] * M], M << 2); D_plsf_3(lsfState, MRDTX, 0, &parm[1], st->lsp); /* reset for next speech frame */ memset(lsfState->past_r_q, 0, M << 2); log_en_index = parm[4]; /* Q11 and divide by 4 */ st->log_en = (Word16)(log_en_index << 9); /* Subtract 2.5 in Q11 */ st->log_en = (Word16)(st->log_en - 5120); /* Index 0 is reserved for silence */ if (log_en_index == 0) { st->log_en = MIN_16; } /* * no interpolation at startup after coder reset * or when SID_UPD has been received right after SPEECH */ if ((st->data_updated == 0) || (st->dtxGlobalState == SPEECH)) { memcpy(st->lsp_old, st->lsp, M << 2); st->old_log_en = st->log_en; } } /* endif valid_data */ /* initialize gain predictor memory of other modes */ ma_pred_init = (Word16)((st->log_en >> 1) - 9000); if (ma_pred_init > 0) { ma_pred_init = 0; } if (ma_pred_init < -14436) { ma_pred_init = -14436; } pred_state->past_qua_en[0] = ma_pred_init; pred_state->past_qua_en[1] = ma_pred_init; pred_state->past_qua_en[2] = ma_pred_init; pred_state->past_qua_en[3] = ma_pred_init; /* past_qua_en for other modes than MR122 */ ma_pred_init = (Word16)((5443 * ma_pred_init) >> 15); /* scale down by factor 20*log10(2) in Q15 */ pred_state->past_qua_en_MR122[0] = ma_pred_init; pred_state->past_qua_en_MR122[1] = ma_pred_init; pred_state->past_qua_en_MR122[2] = ma_pred_init; pred_state->past_qua_en_MR122[3] = ma_pred_init; } /* endif sid_frame */ /* * CN generation * recompute level adjustment factor Q11 * st->log_en_adjust = 0.9*st->log_en_adjust + * 0.1*dtx_log_en_adjust[mode]); */ st->log_en_adjust = (Word16)(((st->log_en_adjust * 29491) >> 15) + (( (dtx_log_en_adjust[mode] << 5) * 3277) >> 20)); /* Interpolate SID info */ /* Q10 */ if (st->since_last_sid > 30) int_fac = 32767; else int_fac = (Word16)((st->since_last_sid + 1) << 10); /* Q10 * Q15 -> Q10 */ int_fac = (int_fac * st->true_sid_period_inv) >> 15; /* Maximize to 1.0 in Q10 */ if (int_fac > 1024) { int_fac = 1024; } /* Q10 -> Q14 */ int_fac = (Word16)(int_fac << 4); /* Q14 * Q11->Q26 */ log_en_int = (int_fac * st->log_en) << 1; for (i = 0; i < M; i++) { /* Q14 * Q15 -> Q14 */ lsp_int[i] = (int_fac * st->lsp[i]) >> 15; } /* 1-k in Q14 */ int_fac = 16384 - int_fac; /* (Q14 * Q11 -> Q26) + Q26 -> Q26 */ log_en_int += (int_fac * st->old_log_en) << 1; for (i = 0; i < M; i++) { /* Q14 + (Q14 * Q15 -> Q14) -> Q14 */ lsp_int[i] = lsp_int[i] + ((int_fac * st->lsp_old[i]) >> 15); /* Q14 -> Q15 */ lsp_int[i] = lsp_int[i] << 1; } /* compute the amount of lsf variability */ /* -0.6 in Q12 */ lsf_variab_factor = st->log_pg_mean - 2457; /* *0.3 Q12*Q15 -> Q12 */ lsf_variab_factor = 4096 - ((lsf_variab_factor * 9830) >> 15); /* limit to values between 0..1 in Q12 */ if (lsf_variab_factor >= 4096) { lsf_variab_factor = 32767; } else if (lsf_variab_factor < 0) { lsf_variab_factor = 0; } else lsf_variab_factor = lsf_variab_factor << 3; /* -> Q15 */ /* get index of vector to do variability with */ lsf_variab_index = pseudonoise(&st->pn_seed_rx, 3); /* convert to lsf */ Lsp_lsf(lsp_int, lsf_int); /* apply lsf variability */ memcpy(lsf_int_variab, lsf_int, M << 2); for (i = 0; i < M; i++) { lsf_int_variab[i] = lsf_int_variab[i] + ((lsf_variab_factor * st-> lsf_hist_mean[i + lsf_variab_index * M]) >> 15); } /* make sure that LSP's are ordered */ Reorder_lsf(lsf_int, LSF_GAP); Reorder_lsf(lsf_int_variab, LSF_GAP); /* copy lsf to speech decoders lsf state */ memcpy(lsfState->past_lsf_q, lsf_int, M << 2); /* convert to lsp */ Lsf_lsp(lsf_int, lsp_int); Lsf_lsp(lsf_int_variab, lsp_int_variab); /* Compute acoeffs Q12 acoeff is used for level * normalization and Post_Filter, acoeff_variab is * used for synthesis filter * by doing this we make sure that the level * in high frequenncies does not jump up and down */ Lsp_Az(lsp_int, acoeff); Lsp_Az(lsp_int_variab, acoeff_variab); /* For use in Post_Filter */ memcpy(&A_t[0], acoeff, MP1 << 2); memcpy(&A_t[MP1], acoeff, MP1 << 2); memcpy(&A_t[MP1 << 1], acoeff, MP1 << 2); memcpy(&A_t[MP1 + MP1 + MP1], acoeff, MP1 << 2); /* Compute reflection coefficients Q15 */ A_Refl(&acoeff[1], refl); /* Compute prediction error in Q15 */ /* 0.99997 in Q15 */ pred_err = MAX_16; for (i = 0; i < M; i++) { pred_err = (pred_err * (MAX_16 - ((refl[i] * refl[i]) >> 15))) >> 15; } /* compute logarithm of prediction gain */ Log2(pred_err, &log_pg_e, &log_pg_m); /* convert exponent and mantissa to Word16 Q12 */ /* Q12 */ log_pg = (log_pg_e - 15) << 12; /* saturate */ if (log_pg < -32768) { log_pg = -32768; } log_pg = (-(log_pg + (log_pg_m >> 3))) >> 1; st->log_pg_mean = (Word16)(((29491 * st->log_pg_mean) >> 15) + ((3277 * log_pg) >> 15)); /* Compute interpolated log energy */ /* Q26 -> Q16 */ log_en_int = log_en_int >> 10; /* Add 4 in Q16 */ log_en_int += 262144L; /* subtract prediction gain */ log_en_int = log_en_int - (log_pg << 4); /* adjust level to speech coder mode */ log_en_int += st->log_en_adjust << 5; log_en_int_e = (Word16)(log_en_int >> 16); log_en_int_m = (Word16)((log_en_int - (log_en_int_e << 16)) >> 1); /* Q4 */ level = (Word16)(Pow2(log_en_int_e, log_en_int_m)); for (i = 0; i < 4; i++) { /* Compute innovation vector */ Build_CN_code(&st->pn_seed_rx, ex); for (j = 0; j < L_SUBFR; j++) { ex[j] = (level * ex[j]) >> 15; } /* Synthesize */ Syn_filt(acoeff_variab, ex, &synth[i * L_SUBFR], L_SUBFR, mem_syn, 1); } /* next i */ /* reset codebook averaging variables */ averState->hangVar = 20; averState->hangCount = 0; if (new_state == DTX_MUTE) { /* * mute comfort noise as it has been quite a long time since * last SID update was performed */ Word32 num, denom; tmp_int_length = st->since_last_sid; if (tmp_int_length > 32) { tmp_int_length = 32; } if (tmp_int_length == 1) { st->true_sid_period_inv = MAX_16; } else { num = 1024; denom = (tmp_int_length << 10); st->true_sid_period_inv = 0; for (i = 0; i < 15; i++) { st->true_sid_period_inv <<= 1; num <<= 1; if (num >= denom) { num = num - denom; st->true_sid_period_inv += 1; } } } st->since_last_sid = 0; memcpy(st->lsp_old, st->lsp, M << 2); st->old_log_en = st->log_en; /* subtract 1/8 in Q11 i.e -6/8 dB */ st->log_en = st->log_en - 256; if (st->log_en < -32768) st->log_en = -32768; } /* * reset interpolation length timer * if data has been updated. */ if ((st->sid_frame != 0) & ((st->valid_data != 0) || ((st->valid_data == 0) & (st->dtxHangoverAdded != 0)))) { st->since_last_sid = 0; st->data_updated = 1; } return; } /* * lsp_avg * * * Parameters: * st->lsp_meanSave B: LSP averages * lsp I: LSPs * * Function: * Calculate the LSP averages * * Returns: * void */ __device__ static void lsp_avg(lsp_avgState *st, Word32 *lsp) { Word32 i, tmp; for (i = 0; i < M; i++) { /* mean = 0.84*mean */ tmp = (st->lsp_meanSave[i] << 16); tmp -= (EXPCONST * st->lsp_meanSave[i]) << 1; /* Add 0.16 of newest LSPs to mean */ tmp += (EXPCONST * lsp[i]) << 1; /* Save means */ tmp += 0x00008000L; st->lsp_meanSave[i] = tmp >> 16; } return; } /* * Int_lpc_1and3 * * * Parameters: * lsp_old I: LSP vector at the 4th subfr. of past frame [M] * lsp_mid I: LSP vector at the 2nd subframe of present frame [M] * lsp_new I: LSP vector at the 4th subframe of present frame [M] * Az O: interpolated LP parameters in subframes 1 and 3 * [AZ_SIZE] * * Function: * Interpolates the LSPs and converts to LPC parameters * to get a different LP filter in each subframe. * * The 20 ms speech frame is divided into 4 subframes. * The LSPs are quantized and transmitted at the 2nd and * 4th subframes (twice per frame) and interpolated at the * 1st and 3rd subframe. * * Returns: * void */ __device__ static void Int_lpc_1and3(Word32 lsp_old[], Word32 lsp_mid[], Word32 lsp_new[], Word32 Az[]) { Word32 lsp[M]; Word32 i; /* lsp[i] = lsp_mid[i] * 0.5 + lsp_old[i] * 0.5 */ for (i = 0; i < 10; i++) { lsp[i] = (lsp_mid[i] >> 1) + (lsp_old[i] >> 1); } /* Subframe 1 */ Lsp_Az(lsp, Az); Az += MP1; /* Subframe 2 */ Lsp_Az(lsp_mid, Az); Az += MP1; for (i = 0; i < 10; i++) { lsp[i] = (lsp_mid[i] >> 1) + (lsp_new[i] >> 1); } /* Subframe 3 */ Lsp_Az(lsp, Az); Az += MP1; /* Subframe 4 */ Lsp_Az(lsp_new, Az); return; } /* * Int_lpc_1to3 * * * Parameters: * lsp_old I: LSP vector at the 4th subframe of past frame [M] * lsp_new I: LSP vector at the 4th subframe of present frame [M] * Az O: interpolated LP parameters in all subframes * [AZ_SIZE] * * Function: * Interpolates the LSPs and converts to LPC parameters to get a different * LP filter in each subframe. * * The 20 ms speech frame is divided into 4 subframes. * The LSPs are quantized and transmitted at the 4th * subframes (once per frame) and interpolated at the * 1st, 2nd and 3rd subframe. * * Returns: * void */ __device__ static void Int_lpc_1to3(Word32 lsp_old[], Word32 lsp_new[], Word32 Az[]) { Word32 lsp[M]; Word32 i; for (i = 0; i < 10; i++) { lsp[i] = (lsp_new[i] >> 2) + (lsp_old[i] - (lsp_old[i] >> 2)); } /* Subframe 1 */ Lsp_Az(lsp, Az); Az += MP1; for (i = 0; i < 10; i++) { lsp[i] = (lsp_old[i] >> 1) + (lsp_new[i] >> 1); } /* Subframe 2 */ Lsp_Az(lsp, Az); Az += MP1; for (i = 0; i < 10; i++) { lsp[i] = (lsp_old[i] >> 2) + (lsp_new[i] - (lsp_new[i] >> 2)); } /* Subframe 3 */ Lsp_Az(lsp, Az); Az += MP1; /* Subframe 4 */ Lsp_Az(lsp_new, Az); return; } /* * D_plsf_5 * * * Parameters: * st->past_lsf_q I: Past dequantized LFSs * st->past_r_q B: past quantized residual * bfi B: bad frame indicator * indice I: quantization indices of 3 submatrices, Q0 * lsp1_q O: quantized 1st LSP vector * lsp2_q O: quantized 2nd LSP vector * * Function: * Decodes the 2 sets of LSP parameters in a frame * using the received quantization indices. * * Returns: * void */ __device__ static void D_plsf_5(D_plsfState *st, Word16 bfi, Word16 *indice, Word32 *lsp1_q , Word32 *lsp2_q) { Word32 lsf1_r[M], lsf2_r[M], lsf1_q[M], lsf2_q[M]; Word32 i, temp1, temp2, sign; const Word32 *p_dico; /* if bad frame */ if (bfi != 0) { /* use the past LSFs slightly shifted towards their mean */ for (i = 0; i < M; i += 2) { /* lsfi_q[i] = ALPHA*st->past_lsf_q[i] + ONE_ALPHA*meanLsf[i]; */ lsf1_q[i] = ((st->past_lsf_q[i] * ALPHA_122) >> 15) + ((mean_lsf_5[i] * ONE_ALPHA_122) >> 15); lsf1_q[i + 1] = ((st->past_lsf_q[i + 1] * ALPHA_122) >> 15) + (( mean_lsf_5[i + 1] * ONE_ALPHA_122) >> 15); } memcpy(lsf2_q, lsf1_q, M << 2); /* estimate past quantized residual to be used in next frame */ for (i = 0; i < M; i += 2) { /* temp = meanLsf[i] + st->past_r_q[i] * LSPPpred_facMR122; */ temp1 = mean_lsf_5[i] + ((st->past_r_q[i] * LSP_PRED_FAC_MR122) >> 15); temp2 = mean_lsf_5[i + 1] + ((st->past_r_q[i + 1] * LSP_PRED_FAC_MR122 ) >> 15); st->past_r_q[i] = lsf2_q[i] - temp1; st->past_r_q[i + 1] = lsf2_q[i + 1] - temp2; } } /* if good LSFs received */ else { /* decode prediction residuals from 5 received indices */ p_dico = &dico1_lsf_5[indice[0] << 2]; lsf1_r[0] = *p_dico++; lsf1_r[1] = *p_dico++; lsf2_r[0] = *p_dico++; lsf2_r[1] = *p_dico++; p_dico = &dico2_lsf_5[indice[1] << 2]; lsf1_r[2] = *p_dico++; lsf1_r[3] = *p_dico++; lsf2_r[2] = *p_dico++; lsf2_r[3] = *p_dico++; sign = (Word16)(indice[2] & 1); i = indice[2] >> 1; p_dico = &dico3_lsf_5[i << 2]; if (sign == 0) { lsf1_r[4] = *p_dico++; lsf1_r[5] = *p_dico++; lsf2_r[4] = *p_dico++; lsf2_r[5] = *p_dico++; } else { lsf1_r[4] = (Word16)(-(*p_dico++)); lsf1_r[5] = (Word16)(-(*p_dico++)); lsf2_r[4] = (Word16)(-(*p_dico++)); lsf2_r[5] = (Word16)(-(*p_dico++)); } p_dico = &dico4_lsf_5[(indice[3] << 2)]; lsf1_r[6] = *p_dico++; lsf1_r[7] = *p_dico++; lsf2_r[6] = *p_dico++; lsf2_r[7] = *p_dico++; p_dico = &dico5_lsf_5[(indice[4] << 2)]; lsf1_r[8] = *p_dico++; lsf1_r[9] = *p_dico++; lsf2_r[8] = *p_dico++; lsf2_r[9] = *p_dico++; /* Compute quantized LSFs and update the past quantized residual */ for (i = 0; i < M; i++) { temp1 = mean_lsf_5[i] + ((st->past_r_q[i] * LSP_PRED_FAC_MR122) >> 15); lsf1_q[i] = lsf1_r[i] + temp1; lsf2_q[i] = lsf2_r[i] + temp1; st->past_r_q[i] = lsf2_r[i]; } } /* verification that LSFs have minimum distance of LSF_GAP Hz */ Reorder_lsf(lsf1_q, LSF_GAP); Reorder_lsf(lsf2_q, LSF_GAP); memcpy(st->past_lsf_q, lsf2_q, M << 2); /* convert LSFs to the cosine domain */ Lsf_lsp(lsf1_q, lsp1_q); Lsf_lsp(lsf2_q, lsp2_q); return; } /* * Dec_lag3 * * * Parameters: * index I: received pitch index * t0_min I: minimum of search range * t0_max I: maximum of search range * i_subfr I: subframe flag * T0_prev I: integer pitch delay of last subframe used * in 2nd and 4th subframes * T0 O: integer part of pitch lag * T0_frac O : fractional part of pitch lag * flag4 I : flag for encoding with 4 bits * Function: * Decoding of fractional pitch lag with 1/3 resolution. * Extract the integer and fraction parts of the pitch lag from * the received adaptive codebook index. * * The fractional lag in 1st and 3rd subframes is encoded with 8 bits * while that in 2nd and 4th subframes is relatively encoded with 4, 5 * and 6 bits depending on the mode. * * Returns: * void */ __device__ static void Dec_lag3(Word32 index, Word32 t0_min, Word32 t0_max, Word32 i_subfr , Word32 T0_prev, Word32 *T0, Word32 *T0_frac, Word32 flag4) { Word32 i, tmp_lag; /* if 1st or 3rd subframe */ if (i_subfr == 0) { if (index < 197) { *T0 = (((index + 2) * 10923) >> 15) + 19; i = *T0 + *T0 + *T0; *T0_frac = (index - i) + 58; } else { *T0 = index - 112; *T0_frac = 0; } } /* 2nd or 4th subframe */ else { if (flag4 == 0) { /* 'normal' decoding: either with 5 or 6 bit resolution */ i = (((index + 2) * 10923) >> 15) - 1; *T0 = i + t0_min; i = i + i + i; *T0_frac = (index - 2) - i; } else { /* decoding with 4 bit resolution */ tmp_lag = T0_prev; if ((tmp_lag - t0_min) > 5) tmp_lag = t0_min + 5; if ((t0_max - tmp_lag) > 4) tmp_lag = t0_max - 4; if (index < 4) { i = (tmp_lag - 5); *T0 = i + index; *T0_frac = 0; } else { if (index < 12) { i = (((index - 5) * 10923) >> 15) - 1; *T0 = i + tmp_lag; i = i + i + i; *T0_frac = (index - 9) - i; } else { i = (index - 12) + tmp_lag; *T0 = i + 1; *T0_frac = 0; } } } /* end if (decoding with 4 bit resolution) */ } return; } /* * Pred_lt_3or6_40 * * * Parameters: * exc B: excitation buffer * T0 I: integer pitch lag * frac I: fraction of lag * flag3 I: if set, upsampling rate = 3 (6 otherwise) * * Function: * Compute the result of long term prediction with fractional * interpolation of resolution 1/3 or 1/6. (Interpolated past excitation). * * Once the fractional pitch lag is determined, * the adaptive codebook vector v(n) is computed by interpolating * the past excitation signal u(n) at the given integer delay k * and phase (fraction) : * * 9 9 * v(n) = SUM[ u(n-k-i) * b60(t+i*6) ] + SUM[ u(n-k+1+i) * b60(6-t+i*6) ], * i=0 i=0 * n = 0, ...,39, t = 0, ...,5. * * The interpolation filter b60 is based on a Hamming windowed sin(x)/x * function truncated at \A1\C0 59 and padded with zeros at \A1\C0 60 (b60(60)=0)). * The filter has a cut-off frequency (-3 dB) at 3 600 Hz in * the over-sampled domain. * * Returns: * void */ __device__ static void Pred_lt_3or6_40(Word32 exc[], Word32 T0, Word32 frac, Word32 flag3) { Word32 s, i; Word32 *x0, *x1, *x2; const Word32 *c1, *c2; x0 = &exc[-T0]; frac = -frac; if (flag3 != 0) { frac <<= 1; /* inter_3l[k] = inter6[2*k] -> k' = 2*k */ } if (frac < 0) { frac += 6; x0--; } c1 = &inter6[frac]; c2 = &inter6[6 - frac]; for (i = 0; i < 40; i++) { x1 = x0++; x2 = x0; s = x1[0] * c1[0]; s += x1[-1] * c1[6]; s += x1[-2] * c1[12]; s += x1[-3] * c1[18]; s += x1[-4] * c1[24]; s += x1[-5] * c1[30]; s += x1[-6] * c1[36]; s += x1[-7] * c1[42]; s += x1[-8] * c1[48]; s += x1[-9] * c1[54]; s += x2[0] * c2[0]; s += x2[1] * c2[6]; s += x2[2] * c2[12]; s += x2[3] * c2[18]; s += x2[4] * c2[24]; s += x2[5] * c2[30]; s += x2[6] * c2[36]; s += x2[7] * c2[42]; s += x2[8] * c2[48]; s += x2[9] * c2[54]; exc[i] = (s + 0x4000) >> 15; } } /* * Dec_lag6 * * * Parameters: * index I: received pitch index * pit_min I: minimum pitch lag * pit_max I: maximum pitch lag * i_subfr I: subframe flag * T0 B: integer part of pitch lag * T0_frac O : fractional part of pitch lag * * Function: * Decoding of fractional pitch lag with 1/6 resolution. * Extract the integer and fraction parts of the pitch lag from * the received adaptive codebook index. * * The fractional lag in 1st and 3rd subframes is encoded with 9 bits * while that in 2nd and 4th subframes is relatively encoded with 6 bits. * Note that in relative encoding only 61 values are used. If the * decoder receives 61, 62, or 63 as the relative pitch index, it means * that a transmission error occurred. In this case, the pitch lag from * previous subframe (actually from previous frame) is used. * * Returns: * void */ __device__ static void Dec_lag6(Word32 index, Word32 pit_min, Word32 pit_max, Word32 i_subfr, Word32 *T0, Word32 *T0_frac) { Word32 t0_min, t0_max, i; /* if 1st or 3rd subframe */ if (i_subfr == 0) { if (index < 463) { /* T0 = (index+5)/6 + 17 */ *T0 = (index + 5) / 6 + 17; i = *T0 + *T0 + *T0; /* *T0_frac = index - T0*6 + 105 */ *T0_frac = (index - (i + i)) + 105; } else { *T0 = index - 368; *T0_frac = 0; } } /* second or fourth subframe */ else { /* find t0_min and t0_max for 2nd (or 4th) subframe */ t0_min = *T0 - 5; if (t0_min < pit_min) { t0_min = pit_min; } t0_max = t0_min + 9; if (t0_max > pit_max) { t0_max = pit_max; t0_min = t0_max - 9; } /* i = (index+5)/6 - 1 */ i = (index + 5) / 6 - 1; *T0 = i + t0_min; i = i + i + i; *T0_frac = (index - 3) - (i + i); } } /* * decompress10 * * * Parameters: * MSBs I: MSB part of the index * LSBs I: LSB part of the index * index1 I: index for first pos in posIndex * index2 I: index for second pos in posIndex * index3 I: index for third pos in posIndex * pos_indx O: position of 3 pulses (decompressed) * Function: * Decompression of the linear codeword * * Returns: * void */ __device__ static void decompress10(Word32 MSBs, Word32 LSBs, Word32 index1, Word32 index2 , Word32 index3, Word32 pos_indx[]) { Word32 divMSB; if (MSBs > 124) { MSBs = 124; } /* * pos_indx[index1] = ((MSBs-25*(MSBs/25))%5)*2 + (LSBs-4*(LSBs/4))%2; * pos_indx[index2] = ((MSBs-25*(MSBs/25))/5)*2 + (LSBs-4*(LSBs/4))/2; * pos_indx[index3] = (MSBs/25)*2 + LSBs/4; */ divMSB = MSBs / 25; pos_indx[index1] = (((MSBs - 25 * (divMSB)) % 5) << 1) + (LSBs & 0x1 ); pos_indx[index2] = (((MSBs - 25 * (divMSB)) / 5) << 1) + ((LSBs & 0x2) >> 1); pos_indx[index3] = (divMSB << 1) + (LSBs >> 2); return; } /* * decompress_codewords * * * Parameters: * indx I: position of 8 pulses (compressed) * pos_indx O: position index of 8 pulses (position only) * * Function: * Decompression of the linear codewords to 4+three indeces * one bit from each pulse is made robust to errors by * minimizing the phase shift of a bit error. * * i0,i4,i1 => one index (7+3) bits, 3 LSBs more robust * i2,i6,i5 => one index (7+3) bits, 3 LSBs more robust * i3,i7 => one index (5+2) bits, 2-3 LSbs more robust * * Returns: * void */ __device__ static void decompress_codewords(Word16 indx[], Word32 pos_indx[]) { Word32 ia, ib, MSBs, LSBs, MSBs0_24, tmp; /* * First index: 10x10x10 -> 2x5x2x5x2x5-> 125x2x2x2 -> 7+1x3 bits * MSBs = indx[NB_TRACK]/8; * LSBs = indx[NB_TRACK]%8; */ MSBs = *indx >> 3; LSBs = *indx & 0x7; decompress10(MSBs, LSBs, 0, 4, 1, pos_indx); /* * Second index: 10x10x10 -> 2x5x2x5x2x5-> 125x2x2x2 -> 7+1x3 bits * MSBs = indx[NB_TRACK+1]/8; * LSBs = indx[NB_TRACK+1]%8; */ MSBs = indx[1] >> 3; LSBs = indx[1] & 0x7; decompress10(MSBs, LSBs, 2, 6, 5, pos_indx); /* * Third index: 10x10 -> 2x5x2x5-> 25x2x2 -> 5+1x2 bits * MSBs = indx[NB_TRACK+2]/4; * LSBs = indx[NB_TRACK+2]%4; * MSBs0_24 = (MSBs*25+12)/32; * if ((MSBs0_24/5)%2==1) * pos_indx[3] = (4-(MSBs0_24%5))*2 + LSBs%2; * else * pos_indx[3] = (MSBs0_24%5)*2 + LSBs%2; * pos_indx[7] = (MSBs0_24/5)*2 + LSBs/2; */ MSBs = indx[2] >> 2; LSBs = indx[2] & 0x3; MSBs0_24 = (((MSBs * 25) + 12) >> 5); tmp = (MSBs0_24 * 6554) >> 15; ia = tmp & 0x1; ib = (MSBs0_24 - (tmp * 5)); if (ia == 1) { ib = 4 - ib; } pos_indx[3] = (ib << 1) + (LSBs & 0x1); pos_indx[7] = (tmp << 1) + (LSBs >> 1); } /* * decode_2i40_9bits * * * Parameters: * subNr I: subframe number * sign I: signs of 2 pulses * index I: Positions of the 2 pulses * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_2i40_9bits(Word32 subNr, Word32 sign, Word32 index, Word32 cod[]) { Word32 pos[2]; Word32 i, j, k; /* Decode the positions */ /* table bit is the MSB */ j = (index & 64) >> 6; i = index & 7; /* pos0 =i*5+startPos[j*8+subNr*2] */ i = (i + (i << 2)); k = startPos[(j << 3) + (subNr << 1)]; pos[0] = i + k; index = index >> 3; i = index & 7; /* pos1 =i*5+startPos[j*8+subNr*2+1] */ i = (i + (i << 2)); k = startPos[((j << 3) + (subNr << 1)) + 1]; pos[1] = (Word16)(i + k); /* decode the signs and build the codeword */ memset(cod, 0, L_SUBFR << 2); for (j = 0; j < 2; j++) { i = sign & 1; sign = sign >> 1; if (i != 0) { cod[pos[j]] = 8191; /* +1.0 */ } else { cod[pos[j]] = -8192; /* -1.0 */ } } return; } /* * decode_2i40_11bits * * * Parameters: * sign I: signs of 2 pulses * index I: Positions of the 2 pulses * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_2i40_11bits(Word32 sign, Word32 index, Word32 cod[]) { Word32 pos[2]; Word32 i, j; /* Decode the positions */ j = index & 1; index = index >> 1; i = index & 7; /* pos0 =i*5+1+j*2 */ i = (i + (i << 2)); i = (i + 1); j = (j << 1); pos[0] = i + j; index = index >> 3; j = index & 3; index = index >> 2; i = index & 7; if (j == 3) { /* pos1 =i*5+4 */ i = (i + (i << 2)); pos[1] = i + 4; } else { /* pos1 =i*5+j */ i = (i + (i << 2)); pos[1] = i + j; } /* decode the signs and build the codeword */ memset(cod, 0, L_SUBFR << 2); for (j = 0; j < 2; j++) { i = sign & 1; sign = sign >> 1; if (i != 0) { cod[pos[j]] = 8191; /* +1.0 */ } else { cod[pos[j]] = -8192; /* -1.0 */ } } return; } /* * decode_3i40_14bits * * * Parameters: * sign I: signs of 3 pulses * index I: Positions of the 3 pulses * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_3i40_14bits(Word32 sign, Word32 index, Word32 cod[]) { Word32 pos[3]; Word32 i, j; /* Decode the positions */ i = index & 7; /* pos0 =i*5 */ pos[0] = i + (i << 2); index = index >> 3; j = index & 1; index = index >> 1; i = index & 7; /* pos1 =i*5+1+j*2 */ i = (i + (i << 2)); i = (i + 1); j = (j << 1); pos[1] = i + j; index = index >> 3; j = index & 1; index = index >> 1; i = index & 7; /* pos2 =i*5+2+j*2 */ i = (i + (i << 2)); i = (i + 2); j = (j << 1); pos[2] = i + j; /* decode the signs and build the codeword */ memset(cod, 0, L_SUBFR << 2); for (j = 0; j < 3; j++) { i = sign & 1; sign = sign >> 1; if (i > 0) { cod[pos[j]] = 8191; /* +1.0 */ } else { cod[pos[j]] = -8192; /* -1.0 */ } } return; } /* * decode_3i40_14bits * * * Parameters: * sign I: signs of 4 pulses * index I: Positions of the 4 pulses * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_4i40_17bits(Word32 sign, Word32 index, Word32 cod[]) { Word32 pos[4]; Word32 i, j; /* Decode the positions */ i = index & 7; i = dgray[i]; /* pos0 =i*5 */ pos[0] = i + (i << 2); index = index >> 3; i = index & 7; i = dgray[i]; /* pos1 =i*5+1 */ i = (i + (i << 2)); pos[1] = i + 1; index = index >> 3; i = index & 7; i = dgray[i]; /* pos2 =i*5+1 */ i = (i + (i << 2)); pos[2] = i + 2; index = index >> 3; j = index & 1; index = index >> 1; i = index & 7; i = dgray[i]; /* pos3 =i*5+3+j */ i = (i + (i << 2)); i = (i + 3); pos[3] = i + j; /* decode the signs and build the codeword */ memset(cod, 0, L_SUBFR << 2); for (j = 0; j < 4; j++) { i = sign & 1; sign = sign >> 1; if (i != 0) { cod[pos[j]] = 8191; } else { cod[pos[j]] = -8192; } } return; } /* * decode_8i40_31bits * * * Parameters: * index I: index of 8 pulses (sign+position) * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_8i40_31bits(Word16 index[], Word32 cod[]) { Word32 linear_codewords[8]; Word32 i, j, pos1, pos2, sign; memset(cod, 0, L_CODE << 2); decompress_codewords(&index[NB_TRACK_MR102], linear_codewords); /* decode the positions and signs of pulses and build the codeword */ for (j = 0; j < NB_TRACK_MR102; j++) { /* compute index i */ i = linear_codewords[j]; i <<= 2; /* position of pulse "j" */ pos1 = i + j; if (index[j] == 0) { sign = POS_CODE; /* +1.0 */ } else { sign = -NEG_CODE; /* -1.0 */ } /* compute index i */ i = linear_codewords[j + 4]; i = i << 2; /* position of pulse "j+4" */ pos2 = i + j; cod[pos1] = sign; if (pos2 < pos1) { sign = -(sign); } cod[pos2] = cod[pos2] + sign; } return; } /* * decode_10i40_35bits * * * Parameters: * index I: index of 10 pulses (sign+position) * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_10i40_35bits(Word16 index[], Word32 cod[]) { Word32 i, j, pos1, pos2, sign, tmp; memset(cod, 0, L_CODE << 2); /* decode the positions and signs of pulses and build the codeword */ for (j = 0; j < 5; j++) { /* compute index i */ tmp = index[j]; i = tmp & 7; i = dgray[i]; i = (i * 5); /* position of pulse "j" */ pos1 = (i + j); i = (tmp >> 3) & 1; if (i == 0) { sign = 4096; /* +1.0 */ } else { sign = -4096; /* -1.0 */ } /* compute index i */ i = index[j + 5] & 7; i = dgray[i]; i = i * 5; /* position of pulse "j+5" */ pos2 = (i + j); cod[pos1] = sign; if (pos2 < pos1) { sign = -(sign); } cod[pos2] = cod[pos2] + sign; } return; } /* * gmed_n * * * Parameters: * ind I: values * n I: The number of gains (odd) * * Function: * Calculates N-point median. * * Returns: * index of the median value */ __device__ static Word32 gmed_n(Word32 ind[], Word32 n) { Word32 tmp[NMAX], tmp2[NMAX]; Word32 max, medianIndex, i, j, ix = 0; for (i = 0; i < n; i++) { tmp2[i] = ind[i]; } for (i = 0; i < n; i++) { max = -32767; for (j = 0; j < n; j++) { if (tmp2[j] >= max) { max = tmp2[j]; ix = j; } } tmp2[ix] = -32768; tmp[i] = ix; } medianIndex = tmp[(n >> 1)]; return(ind[medianIndex]); } /* * ec_gain_pitch * * * Parameters: * st->pbuf I: last five gains * st->past_gain_pit I: past gain * state I: state of the state machine * gain_pitch O: pitch gain * * Function: * Calculates pitch from previous values. * * Returns: * void */ __device__ static void ec_gain_pitch(ec_gain_pitchState *st, Word16 state, Word32 * gain_pitch) { Word32 tmp; /* calculate median of last five gains */ tmp = gmed_n(st->pbuf, 5); /* new gain = minimum(median, past_gain) * pdown[state] */ if (tmp > st->past_gain_pit) { tmp = st->past_gain_pit; } *gain_pitch = (tmp * pdown[state]) >> 15; } /* * d_gain_pitch * * * Parameters: * mode I: AMR mode * index I: index of quantization * * Function: * Decodes the pitch gain using the received index * * Returns: * gain */ __device__ static Word32 d_gain_pitch(enum Mode mode, Word32 index) { Word32 gain; if (mode == MR122) { /* clear 2 LSBits */ gain = (qua_gain_pitch[index] >> 2) << 2; } else { gain = qua_gain_pitch[index]; } return gain; } /* * ec_gain_pitch_update * * * Parameters: * st->prev_gp B: previous pitch gain * st->past_gain_pit O: past gain * st->pbuf B: past gain buffer * bfi I: bad frame indicator * prev_bf I: previous frame was bad * gain_pitch B: pitch gain * * Function: * Update the pitch gain concealment state * Limit gain_pitch if the previous frame was bad * * Returns: * gain */ __device__ static void ec_gain_pitch_update(ec_gain_pitchState *st, Word32 bfi, Word32 prev_bf, Word32 *gain_pitch) { if (bfi == 0) { if (prev_bf != 0) { if (*gain_pitch > st->prev_gp) { *gain_pitch = st->prev_gp; } } st->prev_gp = *gain_pitch; } st->past_gain_pit = *gain_pitch; /* if (st->past_gain_pit > 1.0) */ if (st->past_gain_pit > 16384) { st->past_gain_pit = 16384; } st->pbuf[0] = st->pbuf[1]; st->pbuf[1] = st->pbuf[2]; st->pbuf[2] = st->pbuf[3]; st->pbuf[3] = st->pbuf[4]; st->pbuf[4] = st->past_gain_pit; } /* * gc_pred (366) * * * Parameters: * st->past_qua_en I: MA predictor * st->past_qua_en_MR122 I: MA predictor MR122 * mode I: AMR mode * code I: innovative codebook vector * exp_gcode0 O: predicted gain factor (exponent) * frac_gcode0 O: predicted gain factor (fraction) * exp_en I: innovation energy (MR795) (exponent) * frac_en I: innovation energy (MR795) (fraction) * * Function: * MA prediction of the innovation energy * * Mean removed innovation energy (dB) in subframe n * N-1 * E(n) = 10*log(gc*gc * SUM[(code(i) * code(i)]/N) - EMean * i=0 * N=40 * * Mean innovation energy (dB) * N-1 * Ei(n) = 10*log(SUM[(code(i) * code(i)]/N) * i=0 * * Predicted energy * 4 * Ep(n) = SUM[b(i) * R(n-i)] * i=1 * b = [0.68 0.58 0.34 0.19] * R(k) is quantified prediction error at subframe k * * E_Mean = 36 dB (MR122) * * Predicted gain gc is found by * * gc = POW[10, 0.05 * (Ep(n) + EMean - Ei)] * * Returns: * void */ __device__ static void gc_pred(dec_gc_predState *st, enum Mode mode, Word32 *code, Word32 * exp_gcode0, Word32 *frac_gcode0, Word32 *exp_en, Word32 *frac_en) { Word32 exp, frac, ener_code = 0, i = 0; /* energy of code: * ener_code = sum(code[i]^2) */ while (i < L_SUBFR) { ener_code += code[i] * code[i]; i++; } if ((0x3fffffff <= ener_code) | (ener_code < 0)) ener_code = MAX_32; else ener_code <<= 1; if (mode == MR122) { Word32 ener; /* ener_code = ener_code / lcode; lcode = 40; 1/40 = 26214 Q20 */ ener_code = ((ener_code + 0x00008000L) >> 16) * 52428; /* Q9 * Q20 -> Q30 */ /* energy of code: * ener_code(Q17) = 10 * Log10(energy) / constant * = 1/2 * Log2(energy) * constant = 20*Log10(2) */ /* ener_code = 1/2 * Log2(ener_code); Note: Log2=log2+30 */ Log2(ener_code, &exp, &frac); ener_code = ((exp - 30) << 16) + (frac << 1); /* Q16 for log(), ->Q17 for 1/2 log() */ /* * predicted energy: * ener(Q24) = (Emean + sum{pred[i]*pastEn[i]})/constant * = MEAN_ENER + sum(pred[i]*past_qua_en[i]) * constant = 20*Log10(2) */ ener = 0; i = 0; while (i < 4) { ener += st->past_qua_en_MR122[i] * pred_MR122[i]; i++; } ener <<= 1; ener += MEAN_ENER_MR122; /* * predicted codebook gain * gc0 = Pow10( (ener*constant - ener_code*constant) / 20 ) * = Pow2(ener-ener_code) * = Pow2(int(d)+frac(d)) */ ener = (ener - ener_code) >> 1; /* Q16 */ *exp_gcode0 = ener >> 16; *frac_gcode0 = (ener >> 1) - (*exp_gcode0 << 15); } /* all modes except 12.2 */ else { Word32 tmp, gcode0; int exp_code; /* * Compute: meansEner - 10log10(ener_code/ LSufr) */ exp_code = 0; if (ener_code != 0) { while (!(ener_code & 0x40000000)) { exp_code++; ener_code = ener_code << 1; } } /* Log2 = log2 + 27 */ Log2_norm(ener_code, exp_code, &exp, &frac); /* fact = 10/log2(10) = 3.01 = 24660 Q13 */ /* Q0.Q15 * Q13 -> Q14 */ tmp = (exp * (-49320)) + (((frac * (-24660)) >> 15) << 1); /* * tmp = meansEner - 10log10(ener_code/L_SUBFR) * = meansEner - 10log10(ener_code) + 10log10(L_SUBFR) * = K - fact * Log2(ener_code) * = K - fact * log2(ener_code) - fact*27 * * ==> K = meansEner + fact*27 + 10log10(L_SUBFR) * * meansEner = 33 = 540672 Q14 (MR475, MR515, MR59) * meansEner = 28.75 = 471040 Q14 (MR67) * meansEner = 30 = 491520 Q14 (MR74) * meansEner = 36 = 589824 Q14 (MR795) * meansEner = 33 = 540672 Q14 (MR102) * 10log10(L_SUBFR) = 16.02 = 262481.51 Q14 * fact * 27 = 1331640 Q14 * ----------------------------------------- * (MR475, MR515, MR59) K = 2134793.51 Q14 ~= 16678 * 64 * 2 * (MR67) K = 2065161.51 Q14 ~= 32268 * 32 * 2 * (MR74) K = 2085641.51 Q14 ~= 32588 * 32 * 2 * (MR795) K = 2183945.51 Q14 ~= 17062 * 64 * 2 * (MR102) K = 2134793.51 Q14 ~= 16678 * 64 * 2 */ if (mode == MR102) { /* mean = 33 dB */ tmp += 2134784; /* Q14 */ } else if (mode == MR795) { /* mean = 36 dB */ tmp += 2183936; /* Q14 */ /* * ener_code = <xn xn> * 2^27*2^exp_code * frac_en = ener_code / 2^16 * = <xn xn> * 2^11*2^exp_code * <xn xn> = <xn xn>*2^11*2^exp * 2^exp_en * := frac_en * 2^exp_en * * ==> exp_en = -11-exp_code; */ *frac_en = ener_code >> 16; *exp_en = -11 - exp_code; } else if (mode == MR74) { /* mean = 30 dB */ tmp += 2085632; /* Q14 */ } else if (mode == MR67) { /* mean = 28.75 dB */ tmp += 2065152; /* Q14 */ } else /* MR59, MR515, MR475 */ { /* mean = 33 dB */ tmp += 2134784; /* Q14 */ } /* * Compute gcode0 * = Sum(i=0,3) pred[i]*past_qua_en[i] - ener_code + meanEner */ tmp = tmp << 9; /* Q23 */ /* Q13 * Q10 -> Q23 */ i = 0; while (i < 4) { tmp += pred[i] * st->past_qua_en[i]; i++; } gcode0 = tmp >> 15; /* Q8 */ /* * gcode0 = pow(10.0, gcode0/20) * = pow(2, 3.3219*gcode0/20) * = pow(2, 0.166*gcode0) */ /* 5439 Q15 = 0.165985 */ /* (correct: 1/(20*log10(2)) 0.166096 = 5443 Q15) */ /* For IS641 bitexactness */ if (mode == MR74) { /* Q8 * Q15 -> Q24 */ tmp = gcode0 * 10878; } else { /* Q8 * Q15 -> Q24 */ tmp = gcode0 * 10886; } tmp = tmp >> 9; /* -> Q15 */ /* -> Q0.Q15 */ *exp_gcode0 = tmp >> 15; *frac_gcode0 = tmp - (*exp_gcode0 * 32768); } } /* * gc_pred_update * * * Parameters: * st->past_qua_en B: MA predictor * st->past_qua_en_MR122 B: MA predictor MR122 * qua_ener_MR122 I: quantized energy for update (log2(quaErr)) * qua_ener I: quantized energy for update (20*log10(quaErr)) * * Function: * Update MA predictor with last quantized energy * * Returns: * void */ __device__ static void gc_pred_update(dec_gc_predState *st, Word32 qua_ener_MR122, Word32 qua_ener) { Word32 i; for (i = 3; i > 0; i--) { st->past_qua_en[i] = st->past_qua_en[i - 1]; st->past_qua_en_MR122[i] = st->past_qua_en_MR122[i - 1]; } st->past_qua_en_MR122[0] = qua_ener_MR122; /* log2 (quaErr), Q10 */ st->past_qua_en[0] = qua_ener; /* 20*log10(quaErr), Q10 */ } /* * Dec_gain * * * Parameters: * pred_state->past_qua_en B: MA predictor * pred_state->past_qua_en_MR122 B: MA predictor MR122 * mode I: AMR mode * index I: index of quantization * code I: Innovative vector * evenSubfr I: Flag for even subframes * gain_pit O: Pitch gain * gain_cod O: Code gain * * Function: * Decode the pitch and codebook gains * * Returns: * void */ __device__ static void Dec_gain(dec_gc_predState *pred_state, enum Mode mode, Word32 index, Word32 code[], Word32 evenSubfr, Word32 *gain_pit, Word32 *gain_cod) { Word32 frac, gcode0, exp, qua_ener, qua_ener_MR122, g_code, tmp; const Word32 *p; /* Read the quantized gains (table depends on mode) */ index = index << 2; if ((mode == MR102) || (mode == MR74) || (mode == MR67)) { p = &table_gain_highrates[index]; *gain_pit = *p++; g_code = *p++; qua_ener_MR122 = *p++; qua_ener = *p; } else { if (mode == MR475) { index = index + ((1 - evenSubfr) << 1); p = &table_gain_MR475[index]; *gain_pit = *p++; g_code = *p++; /* * calculate predictor update values (not stored in 4.75 * quantizer table to save space): * qua_ener = log2(g) * qua_ener_MR122 = 20*log10(g) */ /* Log2(x Q12) = log2(x) + 12 */ Log2(g_code, &exp, &frac); exp = exp - 12; tmp = frac >> 5; if ((frac & ((Word16)1 << 4)) != 0) { tmp++; } qua_ener_MR122 = tmp + (exp << 10); /* 24660 Q12 ~= 6.0206 = 20*log10(2) */ tmp = exp * 49320; tmp += (((frac * 24660) >> 15) << 1); /* Q12 * Q0 = Q13 -> Q10 */ qua_ener = ((tmp << 13) + 0x00008000L) >> 16; } else { p = &table_gain_lowrates[index]; *gain_pit = *p++; g_code = *p++; qua_ener_MR122 = *p++; qua_ener = *p; } } /* * predict codebook gain * gc0 = Pow2(int(d)+frac(d)) * = 2^exp + 2^frac * gcode0 (Q14) = 2^14*2^frac = gc0 * 2^(14-exp) */ gc_pred(pred_state, mode, code, &exp, &frac, NULL, NULL); gcode0 = Pow2(14, frac); /* * read quantized gains, update table of past quantized energies * st->past_qua_en(Q10) = 20 * Log10(gFac) / constant * = Log2(gFac) * = qua_ener * constant = 20*Log10(2) */ if (exp < 11) { *gain_cod = (g_code * gcode0) >> (25 - exp); } else { tmp = ((g_code * gcode0) << (exp - 9)); if ((tmp >> (exp - 9)) != (g_code * gcode0)) { *gain_cod = 0x7FFF; } else { *gain_cod = tmp >> 16; } } /* update table of past quantized energies */ gc_pred_update(pred_state, qua_ener_MR122, qua_ener); return; } /* * gc_pred_average_limited * * * Parameters: * st->past_qua_en I: MA predictor * st->past_qua_en_MR122 I: MA predictor MR122 * ener_avg_MR122 O: everaged quantized energy (log2(quaErr)) * ener_avg O: averaged quantized energy (20*log10(quaErr)) * * Function: * Compute average limited quantized energy * Returns: * void */ __device__ static void gc_pred_average_limited(dec_gc_predState *st, Word32 *ener_avg_MR122, Word32 *ener_avg) { Word32 av_pred_en, i; /* do average in MR122 mode (log2() domain) */ av_pred_en = 0; for (i = 0; i < NPRED; i++) { av_pred_en = (av_pred_en + st->past_qua_en_MR122[i]); } /* av_pred_en = 0.25*av_pred_en */ av_pred_en = (av_pred_en * 8192) >> 15; /* if (av_pred_en < -14/(20Log10(2))) av_pred_en = .. */ if (av_pred_en < MIN_ENERGY_MR122) { av_pred_en = MIN_ENERGY_MR122; } *ener_avg_MR122 = (Word16)av_pred_en; /* do average for other modes (20*log10() domain) */ av_pred_en = 0; for (i = 0; i < NPRED; i++) { av_pred_en = (av_pred_en + st->past_qua_en[i]); if (av_pred_en < -32768) av_pred_en = -32768; else if (av_pred_en > 32767) av_pred_en = 32767; } /* av_pred_en = 0.25*av_pred_en */ av_pred_en = (av_pred_en * 8192) >> 15; *ener_avg = av_pred_en; } /* * ec_gain_code * * * Parameters: * st->gbuf I: last five gains * st->past_gain_code I: past gain * pred_state B: MA predictor state * state I: state of the state machine * gain_code O: decoded innovation gain * * Function: * Conceal the codebook gain * * Returns: * void */ __device__ static void ec_gain_code(ec_gain_codeState *st, dec_gc_predState *pred_state, Word16 state, Word32 *gain_code) { Word32 tmp, qua_ener_MR122, qua_ener; /* calculate median of last five gain values */ tmp = gmed_n(st->gbuf, 5); /* new gain = minimum(median, past_gain) * cdown[state] */ if (tmp > st->past_gain_code) { tmp = st->past_gain_code; } tmp = (tmp * cdown[state]) >> 15; *gain_code = tmp; /* * update table of past quantized energies with average of * current values */ gc_pred_average_limited(pred_state, &qua_ener_MR122, &qua_ener); gc_pred_update(pred_state, qua_ener_MR122, qua_ener); } /* * ec_gain_code_update * * * Parameters: * st->gbuf B: last five gains * st->past_gain_code O: past gain * st->prev_gc B previous gain * bfi I: bad indicator * prev_bf I: previous frame bad indicator * gain_code O: decoded innovation gain * * Function: * Update the codebook gain concealment state * * Returns: * void */ __device__ static void ec_gain_code_update(ec_gain_codeState *st, Word16 bfi, Word16 prev_bf, Word32 *gain_code) { /* limit gain_code by previous good gain if previous frame was bad */ if (bfi == 0) { if (prev_bf != 0) { if (*gain_code > st->prev_gc) { *gain_code = st->prev_gc; } } st->prev_gc = *gain_code; } /* update EC states: previous gain, gain buffer */ st->past_gain_code = *gain_code; st->gbuf[0] = st->gbuf[1]; st->gbuf[1] = st->gbuf[2]; st->gbuf[2] = st->gbuf[3]; st->gbuf[3] = st->gbuf[4]; st->gbuf[4] = *gain_code; return; } /* * d_gain_code * * * Parameters: * pred_state B: MA predictor state * mode I: AMR mode (MR795 or MR122) * index I: received quantization index * code I: innovation codevector * gain_code O: decoded innovation gain * * Function: * Decode the fixed codebook gain using the received index * * Returns: * void */ __device__ static void d_gain_code(dec_gc_predState *pred_state, enum Mode mode, Word32 index, Word32 code[], Word32 *gain_code) { Word32 g_code0, exp, frac, qua_ener_MR122, qua_ener; Word32 exp_inn_en, frac_inn_en, tmp, tmp2, i; const Word32 *p; /* * Decode codebook gain */ gc_pred(pred_state, mode, code, &exp, &frac, &exp_inn_en, &frac_inn_en); p = &qua_gain_code[((index + index) + index)]; /* Different scalings between MR122 and the other modes */ if (mode == MR122) { /* predicted gain */ g_code0 = Pow2(exp, frac); if (g_code0 <= 2047) g_code0 = g_code0 << 4; else g_code0 = 32767; *gain_code = ((g_code0 * *p++) >> 15) << 1; if (*gain_code & 0xFFFF8000) *gain_code = 32767; } else { g_code0 = Pow2(14, frac); tmp = (*p++ * g_code0) << 1; exp = 9 - exp; if (exp > 0) { tmp = tmp >> exp; } else { for (i = exp; i < 0; i++) { tmp2 = tmp << 1; if ((tmp ^ tmp2) & 0x80000000) { tmp = (tmp & 0x80000000) ? 0x80000000 : 0x7FFFFFFF; break; } else { tmp = tmp2; } } } *gain_code = tmp >> 16; if (*gain_code & 0xFFFF8000) *gain_code = 32767; } /* * update table of past quantized energies */ qua_ener_MR122 = *p++; qua_ener = *p++; gc_pred_update(pred_state, qua_ener_MR122, qua_ener); return; } /* * Int_lsf * * * Parameters: * lsf_old I: LSF vector at the 4th subframe of past frame * lsf_new I: LSF vector at the 4th subframe of present frame * i_subfr I: current subframe * lsf_out O: interpolated LSF parameters for current subframe * * Function: * Interpolates the LSFs for selected subframe * * The LSFs are interpolated at the 1st, 2nd and 3rd * ubframe and only forwarded at the 4th subframe. * * sf1: 3/4 F0 + 1/4 F1 * sf2: 1/2 F0 + 1/2 F1 * sf3: 1/4 F0 + 3/4 F1 * sf4: F1 * * Returns: * void */ __device__ static void Int_lsf(Word32 lsf_old[], Word32 lsf_new[], int i_subfr, Word32 lsf_out[]) { Word32 i; switch (i_subfr) { case 0: for (i = 0; i < 10; i++) { lsf_out[i] = lsf_old[i] - (lsf_old[i] >> 2) + (lsf_new[i] >> 2); } break; case 40: for (i = 0; i < 10; i++) { lsf_out[i] = (lsf_old[i] >> 1) + (lsf_new[i] >> 1); } break; case 80: for (i = 0; i < 10; i++) { lsf_out[i] = (lsf_old[i] >> 2) - (lsf_new[i] >> 2) + lsf_new[i]; } break; case 120: memcpy(lsf_out, lsf_new, M << 2); break; } } /* * Cb_gain_average * * * Parameters: * st->cbGainHistory B: codebook gain history * st->hangCount B: hangover counter * mode I: AMR mode * gain_code I: codebook gain * lsp I: The LSP for the current frame * lspAver I: The average of LSP for 8 frames * bfi I: bad frame indication * prev_bf I: previous bad frame indication * pdfi I: potential degraded bad frame indication * prev_pdf I: previous potential degraded bad frame indication * inBackgroundNoise I: background noise decision * voicedHangover I: number of frames after last voiced frame * * Function: * The mixed codebook gain, used to make codebook gain more smooth in background * * * Returns: * void */ __device__ static Word32 Cb_gain_average(Cb_gain_averageState *st, enum Mode mode, Word32 gain_code, Word32 lsp[], Word32 lspAver[], Word16 bfi, Word16 prev_bf, Word16 pdfi, Word16 prev_pdf, Word32 inBackgroundNoise, Word32 voicedHangover) { Word32 tmp[M]; Word32 i, cbGainMix, tmp_diff, bgMix, cbGainMean, sum, diff, tmp1, tmp2; int shift1, shift2, shift; /* set correct cbGainMix for MR74, MR795, MR122 */ cbGainMix = gain_code; /* * Store list of CB gain needed in the CB gain averaging * */ st->cbGainHistory[0] = st->cbGainHistory[1]; st->cbGainHistory[1] = st->cbGainHistory[2]; st->cbGainHistory[2] = st->cbGainHistory[3]; st->cbGainHistory[3] = st->cbGainHistory[4]; st->cbGainHistory[4] = st->cbGainHistory[5]; st->cbGainHistory[5] = st->cbGainHistory[6]; st->cbGainHistory[6] = gain_code; /* compute lsp difference */ for (i = 0; i < M; i++) { tmp1 = labs(lspAver[i] - lsp[i]); shift1 = 0; if (tmp1 != 0) { while (!(tmp1 & 0x2000)) { shift1++; tmp1 = tmp1 << 1; } } tmp2 = lspAver[i]; shift2 = 0; if (tmp2 != 0) { while (!(tmp2 & 0x4000)) { shift2++; tmp2 = tmp2 << 1; } } tmp[i] = (tmp1 << 15) / tmp2; shift = 2 + shift1 - shift2; if (shift >= 0) { tmp[i] = tmp[i] >> shift; } else { tmp[i] = tmp[i] << -(shift); } } diff = *tmp + tmp[1] + tmp[2] + tmp[3] + tmp[4] + tmp[5] + tmp[6] + tmp[7] + tmp[8] + tmp[9]; /* saturate */ if (diff > 32767) { diff = 32767; } /* Compute hangover */ st->hangVar += 1; if (diff <= 5325) { st->hangVar = 0; } if (st->hangVar > 10) { /* Speech period, reset hangover variable */ st->hangCount = 0; } /* Compute mix constant (bgMix) */ bgMix = 8192; /* MR475, MR515, MR59, MR67, MR102 */ if ((mode <= MR67) | (mode == MR102)) { /* disable mix if too short time since */ if ((st->hangCount >= 40) & (diff <= 5325)) /* 0.65 in Q13 */ { /* if errors and presumed noise make smoothing probability stronger */ if (((((pdfi != 0) & (prev_pdf != 0)) | (bfi != 0) | ( prev_bf != 0)) & ((voicedHangover > 1)) & ( inBackgroundNoise != 0) & (mode < MR67))) { /* bgMix = min(0.25, max(0.0, diff-0.55)) / 0.25; */ tmp_diff = diff - 4506; /* 0.55 in Q13 */ /* max(0.0, diff-0.55) */ tmp1 = 0; if (tmp_diff > 0) { tmp1 = tmp_diff; } /* min(0.25, tmp1) */ if (2048 >= tmp1) { bgMix = tmp1 << 2; } } else { /* bgMix = min(0.25, max(0.0, diff-0.40)) / 0.25; */ tmp_diff = diff - 3277; /* 0.4 in Q13 */ /* max(0.0, diff-0.40) */ tmp1 = 0; if (tmp_diff > 0) { tmp1 = tmp_diff; } /* min(0.25, tmp1) */ if (2048 >= tmp1) { bgMix = tmp1 << 2; } } } /* * Smoothen the cb gain trajectory * smoothing depends on mix constant bgMix */ sum = st->cbGainHistory[2] + st->cbGainHistory[3] + st->cbGainHistory[4] + st->cbGainHistory[5] + st->cbGainHistory[6]; if (sum > 163822) { cbGainMean = 32767; } else { cbGainMean = (3277 * sum + 0x00002000L) >> 14; /* Q1 */ } /* more smoothing in error and bg noise (NB no DFI used here) */ if (((bfi != 0) | (prev_bf != 0)) & (inBackgroundNoise != 0) & ( mode < MR67)) { sum = 9362 * (st->cbGainHistory[0] + st->cbGainHistory[1] + st-> cbGainHistory[2] + st->cbGainHistory[3] + st->cbGainHistory[4] + st->cbGainHistory[5] + st->cbGainHistory[6]); cbGainMean = (sum + 0x00008000L) >> 16; /* Q1 */ } /* cbGainMix = bgMix*cbGainMix + (1-bgMix)*cbGainMean; */ sum = bgMix * cbGainMix; /* sum in Q14 */ sum += cbGainMean << 13; sum -= bgMix * cbGainMean; cbGainMix = (sum + 0x00001000L) >> 13; /* Q1 */ } st->hangCount += 1; if (st->hangCount & 0x80000000) st->hangCount = 40; return cbGainMix; } /* * ph_disp * * * Parameters: * state->gainMem B: LTP gain memory * state->prevCbGain B: Codebook gain memory * mode I: AMR mode * x B: LTP excitation signal -> total excitation signal * cbGain I: Codebook gain * ltpGain I: LTP gain * inno B: Innovation vector * pitch_fac I: pitch factor used to scale the LTP excitation * tmp_shift I: shift factor applied to sum of scaled LTP ex & innov. * before rounding * * Function: * Adaptive phase dispersion; forming of total excitation * * * Returns: * void */ __device__ static void ph_disp(ph_dispState *state, enum Mode mode, Word32 x[], Word32 cbGain, Word32 ltpGain, Word32 inno[], Word32 pitch_fac, Word32 tmp_shift) { Word32 inno_sav[L_SUBFR], ps_poss[L_SUBFR]; Word32 i, i1, impNr, temp1, temp2, j, nze, nPulse, ppos; const Word32 *ph_imp; /* Pointer to phase dispersion filter */ /* Update LTP gain memory */ state->gainMem[4] = state->gainMem[3]; state->gainMem[3] = state->gainMem[2]; state->gainMem[2] = state->gainMem[1]; state->gainMem[1] = state->gainMem[0]; state->gainMem[0] = ltpGain; /* basic adaption of phase dispersion */ /* no dispersion */ impNr = 2; /* if (ltpGain < 0.9) */ if (ltpGain < PHDTHR2LTP) { /* maximum dispersion */ impNr = 0; /* if (ltpGain > 0.6 */ if (ltpGain > PHDTHR1LTP) { /* medium dispersion */ impNr = 1; } } /* onset indicator */ /* onset = (cbGain > onFact * cbGainMem[0]) */ temp1 = ((state->prevCbGain * ONFACTPLUS1) + 0x1000) >> 13; if (cbGain > temp1) { state->onset = ONLENGTH; } else { if (state->onset > 0) { state->onset--; } } /* * if not onset, check ltpGain buffer and use max phase dispersion if * half or more of the ltpGain-parameters say so */ if (state->onset == 0) { /* Check LTP gain memory and set filter accordingly */ i1 = 0; for (i = 0; i < PHDGAINMEMSIZE; i++) { if (state->gainMem[i] < PHDTHR1LTP) { i1++; } } if (i1 > 2) { impNr = 0; } } /* Restrict decrease in phase dispersion to one step if not onset */ if ((impNr > (state->prevState + 1)) & (state->onset == 0)) { impNr--; } /* if onset, use one step less phase dispersion */ if ((impNr<2)&(state->onset>0)) { impNr++; } /* disable for very low levels */ if (cbGain < 10) { impNr = 2; } if (state->lockFull == 1) { impNr = 0; } /* update static memory */ state->prevState = impNr; state->prevCbGain = cbGain; /* * do phase dispersion for all modes but 12.2 and 7.4; * don't modify the innovation if impNr >=2 (= no phase disp) */ if ((mode != MR122) & (mode != MR102) & (mode != MR74) & (impNr < 2) ) { /* * track pulse positions, save innovation, * and initialize new innovation */ nze = 0; for (i = 0; i < L_SUBFR; i++) { if (inno[i] != 0) { ps_poss[nze] = i; nze++; } } memcpy(inno_sav, inno, L_SUBFR << 2); memset(inno, 0, L_SUBFR << 2); /* Choose filter corresponding to codec mode and dispersion criterium */ ph_imp = ph_imp_mid; if (impNr == 0) { ph_imp = ph_imp_low; } if (mode == MR795) { ph_imp = ph_imp_mid_MR795; if (impNr == 0) { ph_imp = ph_imp_low_MR795; } } /* Do phase dispersion of innovation */ for (nPulse = 0; nPulse < nze; nPulse++) { ppos = ps_poss[nPulse]; /* circular convolution with impulse response */ j = 0; for (i = ppos; i < L_SUBFR; i++) { /* inno[i1] += inno_sav[ppos] * ph_imp[i1-ppos] */ temp1 = (inno_sav[ppos] * ph_imp[j++]) >> 15; inno[i] = inno[i] + temp1; } for (i = 0; i < ppos; i++) { /* inno[i] += inno_sav[ppos] * ph_imp[L_SUBFR-ppos+i] */ temp1 = (inno_sav[ppos] * ph_imp[j++]) >> 15; inno[i] = inno[i] + temp1; } } } /* * compute total excitation for synthesis part of decoder * (using modified innovation if phase dispersion is active) */ for (i = 0; i < L_SUBFR; i++) { /* x[i] = gain_pit*x[i] + cbGain*code[i]; */ temp1 = x[i] * pitch_fac + inno[i] * cbGain; temp2 = temp1 << tmp_shift; x[i] = (temp2 + 0x4000) >> 15; if (labs(x[i]) > 32767) { if ((temp1 ^ temp2) & 0x80000000) { x[i] = (temp1 & 0x80000000) ? -32768 : 32767; } else { x[i] = (temp2 & 0x80000000) ? -32768 : 32767; } } } return; } /* * sqrt_l_exp * * * Parameters: * x I: input value * exp O: right shift to be applied to result * * Function: * Sqrt with exponent value. * * y = sqrt(x) * x = f * 2^-e, 0.5 <= f < 1 (normalization) * y = sqrt(f) * 2^(-e/2) * * a) e = 2k --> y = sqrt(f) * 2^-k * (k = e div 2, 0.707 <= sqrt(f) < 1) * b) e = 2k+1 --> y = sqrt(f/2) * 2^-k * (k = e div 2, 0.5 <= sqrt(f/2) < 0.707) * * * Returns: * y output value */ __device__ static Word32 sqrt_l_exp(Word32 x, Word32 *exp) { Word32 y, a, i, tmp; int e; if (x <= (Word32)0) { *exp = 0; return(Word32)0; } e = 0; if (x != 0) { tmp = x; while (!(tmp & 0x40000000)) { e++; tmp = tmp << 1; } } e = e & 0xFFFE; x = (x << e); *exp = (Word16)e; x = (x >> 9); i = (Word16)(x >> 16); x = (x >> 1); a = x & (Word16)0x7fff; i = (i - 16); y = (sqrt_table[i] << 16); tmp = (sqrt_table[i] - sqrt_table[i + 1]); y -= (tmp * a) << 1; return(y); } /* * Ex_ctrl * * * Parameters: * excitation B: Current subframe excitation * excEnergy I: Exc. Energy, sqrt(totEx*totEx) * exEnergyHist I: History of subframe energies * voicedHangover I: number of frames after last voiced frame * prevBFI I: Set i previous bad frame indicators * carefulFlag I: Restrict dymamic in scaling * * Function: * Charaterice synthesis speech and detect background noise * * Returns: * background noise decision; 0 = no bgn, 1 = bgn */ __device__ static Word16 Ex_ctrl(Word32 excitation[], Word32 excEnergy, Word32 exEnergyHist[], Word32 voicedHangover, Word16 prevBFI, Word16 carefulFlag ) { Word32 i, testEnergy, scaleFactor, avgEnergy, prevEnergy, T0; int exp; /* get target level */ avgEnergy = gmed_n(exEnergyHist, 9); prevEnergy = (exEnergyHist[7] + exEnergyHist[8]) >> 1; if (exEnergyHist[8] < prevEnergy) { prevEnergy = exEnergyHist[8]; } /* upscaling to avoid too rapid energy rises for some cases */ if ((excEnergy<avgEnergy)&(excEnergy>5)) { /* testEnergy = 4*prevEnergy; */ testEnergy = prevEnergy << 2; if ((voicedHangover < 7) || prevBFI != 0) { /* testEnergy = 3*prevEnergy */ testEnergy = testEnergy - prevEnergy; } if (avgEnergy > testEnergy) { avgEnergy = testEnergy; } /* scaleFactor=avgEnergy/excEnergy in Q0 */ exp = 0; if (excEnergy != 0) { while (!(excEnergy & 0x4000)) { exp++; excEnergy = excEnergy << 1; } } excEnergy = 536838144 / excEnergy; T0 = (avgEnergy * excEnergy) << 1; T0 = (T0 >> (20 - exp)); if (T0 > 32767) { /* saturate */ T0 = 32767; } scaleFactor = T0; /* test if scaleFactor > 3.0 */ if ((carefulFlag != 0) & (scaleFactor > 3072)) { scaleFactor = 3072; } /* scale the excitation by scaleFactor */ for (i = 0; i < L_SUBFR; i++) { T0 = (scaleFactor * excitation[i]) << 1; T0 = (T0 >> 11); excitation[i] = T0; } } return 0; } /* * Inv_sqrt * * * Parameters: * x I: input value * * Function: * 1/sqrt(x) * * Returns: * y 1/sqrt(x) */ __device__ static Word32 Inv_sqrt(Word32 x) { int i, a, tmp, exp; Word32 y; if (x <= (Word32)0) return((Word32)0x3fffffffL); exp = 0; while (!(x & 0x40000000)) { exp++; x = x << 1; } /* x is normalized */ exp = (30 - exp); /* If exponent even -> shift right */ if ((exp & 1) == 0) { x = (x >> 1); } exp = (exp >> 1); exp = (exp + 1); x = (x >> 9); /* Extract b25-b31 */ i = (Word16)(x >> 16); /* Extract b10-b24 */ x = (x >> 1); a = x & (Word16)0x7fff; i = (i - 16); /* table[i] << 16 */ y = inv_sqrt_table[i] << 16; /* table[i] - table[i+1]) */ tmp = (inv_sqrt_table[i] - inv_sqrt_table[i + 1]); /* y -= tmp*a*2 */ y -= (tmp * a) << 1; /* denormalization */ y = (y >> exp); return(y); } /* * energy_old * * * Parameters: * in I: input value * * Function: * Energy of signal * * Returns: * Energy */ __device__ static Word32 energy_old(Word32 in[]) { Word32 temp, i, sum = 0; for (i = 0; i < L_SUBFR; i += 8) { temp = in[i] >> 2; sum += temp * temp; temp = in[i + 1] >> 2; sum += temp * temp; temp = in[i + 2] >> 2; sum += temp * temp; temp = in[i + 3] >> 2; sum += temp * temp; temp = in[i + 4] >> 2; sum += temp * temp; temp = in[i + 5] >> 2; sum += temp * temp; temp = in[i + 6] >> 2; sum += temp * temp; temp = in[i + 7] >> 2; sum += temp * temp; } if (sum & 0xC0000000) { return 0x7FFFFFFF; } return(sum << 1); } /* * energy_new * * * Parameters: * in I: input value * * Function: * Energy of signal * * Returns: * Energy */ __device__ static Word32 energy_new(Word32 in[]) { Word32 i, s = 0, overflow = 0; s += in[0] * in[0]; for (i = 1; i < L_SUBFR; i += 3) { s += in[i] * in[i]; s += in[i + 1] * in[i + 1]; s += in[i + 2] * in[i + 2]; if (s & 0xC0000000) { overflow = 1; break; } } /* check for overflow */ if (overflow) { s = energy_old(in); } else { s = (s >> 3); } return s; } /* * agc2 * * * Parameters: * sig_in I: Post_Filter input signal * sig_out B: Post_Filter output signal * * Function: * Scales the excitation on a subframe basis * * Returns: * Energy */ __device__ static void agc2(Word32 *sig_in, Word32 *sig_out) { Word32 s; int i, exp; Word16 gain_in, gain_out, g0; /* calculate gain_out with exponent */ s = energy_new(sig_out); if (s == 0) { return; } exp = 0; while (!(s & 0x20000000)) { exp++; s = s << 1; } gain_out = (Word16)((s + 0x00008000L) >> 16); /* calculate gain_in with exponent */ s = energy_new(sig_in); if (s == 0) { g0 = 0; } else { i = 0; while (!(s & 0x40000000)) { i++; s = s << 1; } if (s < 0x7fff7fff) gain_in = (Word16)((s + 0x00008000L) >> 16); else gain_in = 32767; exp = (exp - i); /* * g0 = sqrt(gain_in/gain_out); */ /* s = gain_out / gain_in */ s = (gain_out << 15) / gain_in; s = (s << 7); if (exp > 0) s = (s >> exp); else s = (s << (-exp)); s = Inv_sqrt(s); g0 = (Word16)(((s << 9) + 0x00008000L) >> 16); } /* sig_out(n) = gain(n) * sig_out(n) */ for (i = 0; i < L_SUBFR; i++) { sig_out[i] = (sig_out[i] * g0) >> 12; } return; } /* * Bgn_scd * * * Parameters: * st->frameEnergyHist B: Frame Energy memory * st->bgHangover B: Background hangover counter * ltpGainHist I: LTP gain history * speech I: synthesis speech frame * voicedHangover O: number of frames after last voiced frame * * Function: * Charaterice synthesis speech and detect background noise * * Returns: * inbgNoise background noise decision; 0 = no bgn, 1 = bgn */ __device__ static Word16 Bgn_scd(Bgn_scdState *st, Word32 ltpGainHist[], Word32 speech[], Word32 *voicedHangover) { Word32 temp, ltpLimit, frame_energyMin, currEnergy, noiseFloor, maxEnergy, maxEnergyLastPart, s, i; Word16 prevVoiced, inbgNoise; /* * Update the inBackgroundNoise flag (valid for use in next frame if BFI) * it now works as a energy detector floating on top * not as good as a VAD. */ s = 0; for (i = 0; i < L_FRAME; i++) { s += speech[i] * speech[i]; } if ((s < 0xFFFFFFF) & (s >= 0)) currEnergy = s >> 13; else currEnergy = 32767; frame_energyMin = 32767; for (i = 0; i < L_ENERGYHIST; i++) { if (st->frameEnergyHist[i] < frame_energyMin) frame_energyMin = st->frameEnergyHist[i]; } /* Frame Energy Margin of 16 */ noiseFloor = frame_energyMin << 4; maxEnergy = st->frameEnergyHist[0]; for (i = 1; i < L_ENERGYHIST - 4; i++) { if (maxEnergy < st->frameEnergyHist[i]) { maxEnergy = st->frameEnergyHist[i]; } } maxEnergyLastPart = st->frameEnergyHist[2 * L_ENERGYHIST / 3]; for (i = 2 * L_ENERGYHIST / 3 + 1; i < L_ENERGYHIST; i++) { if (maxEnergyLastPart < st->frameEnergyHist[i]) { maxEnergyLastPart = st->frameEnergyHist[i]; } } /* false */ inbgNoise = 0; /* * Do not consider silence as noise * Do not consider continuous high volume as noise * Or if the current noise level is very low * Mark as noise if under current noise limit * OR if the maximum energy is below the upper limit */ if ((maxEnergy> LOWERNOISELIMIT)&(currEnergy<FRAMEENERGYLIMIT)&( currEnergy>LOWERNOISELIMIT) & ((currEnergy < noiseFloor) || ( maxEnergyLastPart < UPPERNOISELIMIT))) { if ((st->bgHangover + 1) > 30) { st->bgHangover = 30; } else { st->bgHangover += 1; } } else { st->bgHangover = 0; } /* make final decision about frame state, act somewhat cautiosly */ if (st->bgHangover > 1) inbgNoise = 1; /* true */ for (i = 0; i < L_ENERGYHIST - 1; i++) { st->frameEnergyHist[i] = st->frameEnergyHist[i + 1]; } st->frameEnergyHist[L_ENERGYHIST - 1] = currEnergy; /* * prepare for voicing decision; * tighten the threshold after some time in noise */ ltpLimit = 13926; /* 0.85 Q14 */ if (st->bgHangover > 8) { ltpLimit = 15565; /* 0.95 Q14 */ } if (st->bgHangover > 15) { ltpLimit = 16383; /* 1.00 Q14 */ } /* weak sort of voicing indication. */ prevVoiced = 0; /* false */ if (gmed_n(&ltpGainHist[4], 5) > ltpLimit) { prevVoiced = 1; /* true */ } if (st->bgHangover > 20) { if (gmed_n(ltpGainHist, 9) > ltpLimit) { prevVoiced = 1; /* true */ } else { prevVoiced = 0; /* false */ } } if (prevVoiced) { *voicedHangover = 0; } else { temp = *voicedHangover + 1; if (temp > 10) { *voicedHangover = 10; } else { *voicedHangover = temp; } } return inbgNoise; } /* * dtx_dec_activity_update * * * Parameters: * st->lsf_hist_ptr B: LSF history pointer * st->lsf_hist B: LSF history * lsf I: lsf * frame I: noise frame * * Function: * Update lsp history and compute log energy. * * Returns: * void */ __device__ static void dtx_dec_activity_update(dtx_decState *st, Word32 lsf[], Word32 frame[]) { Word32 frame_en; Word32 log_en_e, log_en_m, log_en, i; /* update lsp history */ st->lsf_hist_ptr += M; if (st->lsf_hist_ptr == 80) { st->lsf_hist_ptr = 0; } memcpy(&st->lsf_hist[st->lsf_hist_ptr], lsf, M << 2); /* compute log energy based on frame energy */ frame_en = 0; /* Q0 */ for (i = 0; (i < L_FRAME); i++) { frame_en += frame[i] * frame[i]; if (frame_en & 0x80000000) break; } log_en = (frame_en & 0xC0000000) ? 0x7FFFFFFE : (Word32)frame_en << 1; Log2(log_en, &log_en_e, &log_en_m); /* convert exponent and mantissa to Word16 Q10 */ log_en = log_en_e << 10; /* Q10 */ log_en = log_en + (log_en_m >> 5); /* divide with L_FRAME i.e subtract with log2(L_FRAME) = 7.32193 */ log_en = log_en - 8521; /* * insert into log energy buffer, no division by two as * log_en in decoder is Q11 */ st->log_en_hist_ptr += 1; if (st->log_en_hist_ptr == DTX_HIST_SIZE) { st->log_en_hist_ptr = 0; } st->log_en_hist[st->log_en_hist_ptr] = log_en; /* Q11 */ } /* * Decoder_amr * * * Parameters: * st B: State variables * mode I: AMR mode * parm I: vector of synthesis parameters * frame_type I: received frame type * synth O: synthesis speech * A_t O: decoded LP filter in 4 subframes * * Function: * Speech decoder routine * * Returns: * void */ __device__ static void Decoder_amr(Decoder_amrState *st, enum Mode mode, Word16 parm[], enum RXFrameType frame_type, Word32 synth[], Word32 A_t[]) { /* LSPs */ Word32 lsp_new[M]; Word32 lsp_mid[M]; /* LSFs */ Word32 prev_lsf[M]; Word32 lsf_i[M]; /* Algebraic codevector */ Word32 code[L_SUBFR]; /* excitation */ Word32 excp[L_SUBFR]; Word32 exc_enhanced[L_SUBFR]; /* Scalars */ Word32 i, i_subfr, overflow, T0_frac, index, temp, temp2, subfrNr, excEnergy; Word32 gain_code, gain_code_mix, pit_sharp, pit_flag, pitch_fac, t0_min, t0_max; Word32 gain_pit = 0, evenSubfr = 0, T0 = 0, index_mr475 = 0; Word32 *Az; /* Pointer on A_t */ Word16 flag4, carefulFlag; Word16 delta_frc_low, delta_frc_range, tmp_shift; Word16 bfi = 0, pdfi = 0; /* bad frame indication flag, potential degraded bad frame flag */ enum DTXStateType newDTXState; /* SPEECH , DTX, DTX_MUTE */ /* find the new DTX state SPEECH OR DTX */ newDTXState = rx_dtx_handler(&st->dtxDecoderState, frame_type); /* DTX actions */ if (newDTXState != SPEECH) { Decoder_amr_reset(st, MRDTX); dtx_dec(&st->dtxDecoderState, st->mem_syn, &st->lsfState, &st->pred_state, &st->Cb_gain_averState, newDTXState, mode, parm, synth, A_t); /* update average lsp */ Lsf_lsp(st->lsfState.past_lsf_q, st->lsp_old); lsp_avg(&st->lsp_avg_st, st->lsfState.past_lsf_q); goto theEnd; } /* SPEECH action state machine */ if (table_speech_bad[frame_type]) { bfi = 1; if (frame_type != RX_SPEECH_BAD) { Build_CN_param(&st->nodataSeed, mode, parm); } } else if (frame_type == RX_SPEECH_DEGRADED) { pdfi = 1; } if (bfi != 0) { st->state += 1; } else if (st->state == 6) { st->state = 5; } else { st->state = 0; } if (st->state > 6) { st->state = 6; } /* * If this frame is the first speech frame after CNI period, * set the BFH state machine to an appropriate state depending * on whether there was DTX muting before start of speech or not * If there was DTX muting, the first speech frame is muted. * If there was no DTX muting, the first speech frame is not * muted. The BFH state machine starts from state 5, however, to * keep the audible noise resulting from a SID frame which is * erroneously interpreted as a good speech frame as small as * possible (the decoder output in this case is quickly muted) */ if (st->dtxDecoderState.dtxGlobalState == DTX) { st->state = 5; st->prev_bf = 0; } else if (st->dtxDecoderState.dtxGlobalState == DTX_MUTE) { st->state = 5; st->prev_bf = 1; } /* save old LSFs for CB gain smoothing */ memcpy(prev_lsf, st->lsfState.past_lsf_q, M << 2); /* * decode LSF parameters and generate interpolated lpc coefficients * for the 4 subframes */ if (mode != MR122) { D_plsf_3(&st->lsfState, mode, bfi, parm, lsp_new); /* Advance synthesis parameters pointer */ parm += 3; Int_lpc_1to3(st->lsp_old, lsp_new, A_t); } else { D_plsf_5(&st->lsfState, bfi, parm, lsp_mid, lsp_new); /* Advance synthesis parameters pointer */ parm += 5; Int_lpc_1and3(st->lsp_old, lsp_mid, lsp_new, A_t); } /* update the LSPs for the next frame */ memcpy(st->lsp_old, lsp_new, M << 2); /* * Loop for every subframe in the analysis frame * * The subframe size is L_SUBFR and the loop is repeated * L_FRAME/L_SUBFR times * * - decode the pitch delay * - decode algebraic code * - decode pitch and codebook gains * - find the excitation and compute synthesis speech */ /* pointer to interpolated LPC parameters */ Az = A_t; evenSubfr = 0; subfrNr = -1; for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR) { subfrNr += 1; evenSubfr = 1 - evenSubfr; /* flag for first and 3th subframe */ pit_flag = i_subfr; if (i_subfr == L_FRAME_BY2) { if ((mode != MR475) & (mode != MR515)) { pit_flag = 0; } } /* pitch index */ index = *parm++; /* * decode pitch lag and find adaptive codebook vector. */ if (mode != MR122) { /* * flag4 indicates encoding with 4 bit resolution; * this is needed for mode MR475, MR515, MR59 and MR67 */ flag4 = 0; if ((mode == MR475) || (mode == MR515) || (mode == MR59) || ( mode == MR67)) { flag4 = 1; } /* * get ranges for the t0_min and t0_max * only needed in delta decoding */ delta_frc_low = 5; delta_frc_range = 9; if (mode == MR795) { delta_frc_low = 10; delta_frc_range = 19; } t0_min = st->old_T0 - delta_frc_low; if (t0_min < PIT_MIN) { t0_min = PIT_MIN; } t0_max = t0_min + delta_frc_range; if (t0_max > PIT_MAX) { t0_max = PIT_MAX; t0_min = t0_max - delta_frc_range; } Dec_lag3(index, t0_min, t0_max, pit_flag, st->old_T0, &T0, &T0_frac, flag4); st->T0_lagBuff = T0; if (bfi != 0) { if (st->old_T0 < PIT_MAX) { /* Graceful pitch degradation */ st->old_T0 += 1; } T0 = st->old_T0; T0_frac = 0; if ((st->inBackgroundNoise != 0) & (st->voicedHangover > 4) & ( (mode == MR475) || (mode == MR515) || (mode == MR59))) { T0 = st->T0_lagBuff; } } Pred_lt_3or6_40(&st->old_exc[st->exc], T0, T0_frac, 1); } else { Dec_lag6(index, PIT_MIN_MR122, PIT_MAX, pit_flag, &T0, &T0_frac); if ((bfi != 0) || ((pit_flag != 0) & (index > 60))) { st->T0_lagBuff = T0; T0 = st->old_T0; T0_frac = 0; } Pred_lt_3or6_40(&st->old_exc[st->exc], T0, T0_frac, 0); } /* * (MR122 only: Decode pitch gain.) * Decode innovative codebook. * set pitch sharpening factor */ /* MR475, MR515 */ if ((mode == MR475) || (mode == MR515)) { /* index of position */ index = *parm++; /* signs */ i = *parm++; decode_2i40_9bits(subfrNr, i, index, code); pit_sharp = st->sharp << 1; } /* MR59 */ else if (mode == MR59) { /* index of position */ index = *parm++; /* signs */ i = *parm++; decode_2i40_11bits(i, index, code); pit_sharp = st->sharp << 1; } /* MR67 */ else if (mode == MR67) { /* index of position */ index = *parm++; /* signs */ i = *parm++; decode_3i40_14bits(i, index, code); pit_sharp = st->sharp << 1; } /* MR74, MR795 */ else if (mode <= MR795) { /* index of position */ index = *parm++; /* signs */ i = *parm++; decode_4i40_17bits(i, index, code); pit_sharp = st->sharp << 1; } /* MR102 */ else if (mode == MR102) { decode_8i40_31bits(parm, code); parm += 7; pit_sharp = st->sharp << 1; } /* MR122 */ else { index = *parm++; if (bfi != 0) { ec_gain_pitch(&st->ec_gain_p_st, st->state, &gain_pit); } else { gain_pit = d_gain_pitch(mode, index); } ec_gain_pitch_update(&st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit); decode_10i40_35bits(parm, code); parm += 10; /* * pit_sharp = gain_pit; * if (pit_sharp > 1.0) pit_sharp = 1.0; */ pit_sharp = gain_pit; if (pit_sharp > 16383) pit_sharp = 32767; else pit_sharp *= 2; } /* * Add the pitch contribution to code[]. */ for (i = T0; i < L_SUBFR; i++) { temp = (code[i - T0] * pit_sharp) >> 15; code[i] = code[i] + temp; } /* * Decode codebook gain (MR122) or both pitch * gain and codebook gain (all others) * Update pitch sharpening "sharp" with quantized gain_pit */ if (mode == MR475) { /* read and decode pitch and code gain */ if (evenSubfr != 0) { /* index of gain(s) */ index_mr475 = *parm++; } if (bfi == 0) { Dec_gain(&st->pred_state, mode, index_mr475, code, evenSubfr, & gain_pit, &gain_code); } else { ec_gain_pitch(&st->ec_gain_p_st, st->state, &gain_pit); ec_gain_code(&st->ec_gain_c_st, &st->pred_state, st->state, & gain_code); } ec_gain_pitch_update(&st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit); ec_gain_code_update(&st->ec_gain_c_st, bfi, st->prev_bf, &gain_code); pit_sharp = gain_pit; if (pit_sharp > SHARPMAX) { pit_sharp = SHARPMAX; } } else if ((mode <= MR74) || (mode == MR102)) { /* read and decode pitch and code gain */ /* index of gain(s) */ index = *parm++; if (bfi == 0) { Dec_gain(&st->pred_state, mode, index, code, evenSubfr, &gain_pit, & gain_code); } else { ec_gain_pitch(&st->ec_gain_p_st, st->state, &gain_pit); ec_gain_code(&st->ec_gain_c_st, &st->pred_state, st->state, & gain_code); } ec_gain_pitch_update(&st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit); ec_gain_code_update(&st->ec_gain_c_st, bfi, st->prev_bf, &gain_code); pit_sharp = gain_pit; if (pit_sharp > SHARPMAX) { pit_sharp = SHARPMAX; } if (mode == MR102) { if (st->old_T0 > (L_SUBFR + 5)) { pit_sharp = pit_sharp >> 2; } } } else { /* read and decode pitch gain */ /* index of gain(s) */ index = *parm++; if (mode == MR795) { /* decode pitch gain */ if (bfi != 0) { ec_gain_pitch(&st->ec_gain_p_st, st->state, &gain_pit); } else { gain_pit = d_gain_pitch(mode, index); } ec_gain_pitch_update(&st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit ); /* read and decode code gain */ index = *parm++; if (bfi == 0) { d_gain_code(&st->pred_state, mode, index, code, &gain_code); } else { ec_gain_code(&st->ec_gain_c_st, &st->pred_state, st->state, & gain_code); } ec_gain_code_update(&st->ec_gain_c_st, bfi, st->prev_bf, &gain_code ); pit_sharp = gain_pit; if (pit_sharp > SHARPMAX) { pit_sharp = SHARPMAX; } } else { /* MR122 */ if (bfi == 0) { d_gain_code(&st->pred_state, mode, index, code, &gain_code); } else { ec_gain_code(&st->ec_gain_c_st, &st->pred_state, st->state, & gain_code); } ec_gain_code_update(&st->ec_gain_c_st, bfi, st->prev_bf, &gain_code ); pit_sharp = gain_pit; } } /* * store pitch sharpening for next subframe * (for modes which use the previous pitch gain for * pitch sharpening in the search phase) * do not update sharpening in even subframes for MR475 */ if ((mode != MR475) || evenSubfr == 0) { st->sharp = gain_pit; if (st->sharp > SHARPMAX) { st->sharp = SHARPMAX; } } if (pit_sharp > 16383) pit_sharp = 32767; else pit_sharp *= 2; if (pit_sharp > 16384) { for (i = 0; i < L_SUBFR; i++) { temp = (st->old_exc[st->exc + i] * pit_sharp) >> 15; temp2 = (temp * gain_pit) << 1; if (mode == MR122) { temp2 = (temp2 >> 1); } excp[i] = (temp2 + 0x00008000L) >> 16; } } /* * Store list of LTP gains needed in the source * characteristic detector (SCD) */ if (bfi == 0) { for (i = 0; i < 8; i++) { st->ltpGainHistory[i] = st->ltpGainHistory[i + 1]; } st->ltpGainHistory[8] = gain_pit; } /* * Limit gain_pit if in background noise and BFI * for MR475, MR515, MR59 */ if ((st->prev_bf != 0 || bfi != 0) & (st->inBackgroundNoise != 0) & ( (mode == MR475) || (mode == MR515) || (mode == MR59))) { /* if (gain_pit > 0.75) in Q14*/ if (gain_pit > 12288) /* gain_pit = (gain_pit-0.75)/2.0 + 0.75; */ gain_pit = ((gain_pit - 12288) >> 1) + 12288; /* if (gain_pit > 0.90) in Q14*/ if (gain_pit > 14745) { gain_pit = 14745; } } /* * Calculate CB mixed gain */ Int_lsf(prev_lsf, st->lsfState.past_lsf_q, i_subfr, lsf_i); gain_code_mix = Cb_gain_average(&st->Cb_gain_averState, mode, gain_code, lsf_i, st->lsp_avg_st.lsp_meanSave, bfi, st->prev_bf, pdfi, st-> prev_pdf, st->inBackgroundNoise, st->voicedHangover); /* make sure that MR74, MR795, MR122 have original codeGain*/ /* MR74, MR795, MR122 */ if ((mode > MR67) & (mode != MR102)) { gain_code_mix = gain_code; } /* * Find the total excitation. * Find synthesis speech corresponding to st->exc[]. */ /* MR475, MR515, MR59, MR67, MR74, MR795, MR102*/ if (mode <= MR102) { pitch_fac = gain_pit; tmp_shift = 1; } /* MR122 */ else { pitch_fac = gain_pit >> 1; tmp_shift = 2; } /* * copy unscaled LTP excitation to exc_enhanced (used in phase * dispersion below) and compute total excitation for LTP feedback */ memcpy(exc_enhanced, &st->old_exc[st->exc], L_SUBFR << 2); for (i = 0; i < L_SUBFR; i++) { /* st->exc[i] = gain_pit*st->exc[i] + gain_code*code[i]; */ temp = (st->old_exc[st->exc + i] * pitch_fac) + (code[i] * gain_code); temp2 = (temp << tmp_shift); if (((temp2 >> 1) ^ temp2) & 0x40000000) { if ((temp ^ temp2) & 0x80000000) { temp2 = (temp & 0x80000000) ? (-1073741824L) : 1073725439; } else { temp2 = (temp2 & 0x80000000) ? (-1073741824L) : 1073725439; } } st->old_exc[st->exc + i] = (temp2 + 0x00004000L) >> 15; } /* * Adaptive phase dispersion */ /* free phase dispersion adaption */ st->ph_disp_st.lockFull = 0; if (((mode == MR475) || (mode == MR515) || (mode == MR59)) & (st ->voicedHangover > 3) & (st->inBackgroundNoise != 0) & (bfi != 0 )) { /* * Always Use full Phase Disp. * if error in bg noise */ st->ph_disp_st.lockFull = 1; } /* * apply phase dispersion to innovation (if enabled) and * compute total excitation for synthesis part */ ph_disp(&st->ph_disp_st, mode, exc_enhanced, gain_code_mix, gain_pit, code , pitch_fac, tmp_shift); /* * The Excitation control module are active during BFI. * Conceal drops in signal energy if in bg noise. */ temp2 = 0; for (i = 0; i < L_SUBFR; i++) { temp2 += (exc_enhanced[i] * exc_enhanced[i]); } if (temp2 > 0x3FFFFFFF) { excEnergy = 11584; } else { temp2 = sqrt_l_exp(temp2, &temp); temp2 = (temp2 >> ((temp >> 1) + 15)); excEnergy = temp2 >> 2; } if (((mode == MR475) || (mode == MR515) || (mode == MR59)) & (st ->voicedHangover > 5) & (st->inBackgroundNoise != 0) & (st-> state < 4) & (((pdfi != 0) & (st->prev_pdf != 0)) || bfi != 0 || st->prev_bf != 0)) { carefulFlag = 0; if ((pdfi != 0) & (bfi == 0)) { carefulFlag = 1; } Ex_ctrl(exc_enhanced, excEnergy, st->excEnergyHist, st->voicedHangover , st->prev_bf, carefulFlag); } if ((st->inBackgroundNoise != 0) & (bfi != 0 || st->prev_bf != 0) & ( st->state < 4)) { ; /* do nothing! */ } else { /* Update energy history for all modes */ for (i = 0; i < 8; i++) { st->excEnergyHist[i] = st->excEnergyHist[i + 1]; } st->excEnergyHist[8] = excEnergy; } /* * Excitation control module end. */ if (pit_sharp > 16384) { for (i = 0; i < L_SUBFR; i++) { excp[i] = excp[i] + exc_enhanced[i]; if (labs(excp[i]) > 32767) excp[i] = (excp[i] & 0x80000000) ? -32768 : 32767; } agc2(exc_enhanced, excp); overflow = Syn_filt(Az, excp, &synth[i_subfr], L_SUBFR, st->mem_syn, 0 ); } else { overflow = Syn_filt(Az, exc_enhanced, &synth[i_subfr], L_SUBFR, st-> mem_syn, 0); } if (overflow) { for (i = 0; i < PIT_MAX + L_INTERPOL + L_SUBFR; i++) { st->old_exc[i] = st->old_exc[i] >> 2; } for (i = 0; i < L_SUBFR; i++) { exc_enhanced[i] = exc_enhanced[i] >> 2; } Syn_filt_overflow(Az, exc_enhanced, &synth[i_subfr], L_SUBFR, st->mem_syn, 1); } else { memcpy(st->mem_syn, &synth[i_subfr + 30], 40); } /* * Update signal for next frame. * -> shift to the left by L_SUBFR st->exc[] */ memcpy(&st->old_exc[0], &st->old_exc[L_SUBFR], (PIT_MAX + L_INTERPOL) << 2); /* interpolated LPC parameters for next subframe */ Az += MP1; /* store T0 for next subframe */ st->old_T0 = T0; } /* * Call the Source Characteristic Detector which updates * st->inBackgroundNoise and st->voicedHangover. */ st->inBackgroundNoise = Bgn_scd(&st->background_state, &(st->ltpGainHistory[ 0]), &(synth[0]), &(st->voicedHangover)); dtx_dec_activity_update(&st->dtxDecoderState, st->lsfState.past_lsf_q, synth); /* store bfi for next subframe */ st->prev_bf = bfi; st->prev_pdf = pdfi; /* * Calculate the LSF averages on the eight * previous frames */ lsp_avg(&st->lsp_avg_st, st->lsfState.past_lsf_q); theEnd: st->dtxDecoderState.dtxGlobalState = newDTXState; return; } /* * Residu40 * * * Parameters: * a I: prediction coefficients * x I: speech signal * y O: residual signal * * Function: * The LP residual is computed by filtering the input * speech through the LP inverse filter a(z) * * Returns: * void */ __device__ static void Residu40(Word32 a[], Word32 x[], Word32 y[]) { Word32 s, i, j; for (i = 0; i < 40; i++) { s = a[0] * x[i] + a[1] * x[i - 1] + a[2] * x[i - 2] + a[3] * x[i - 3]; s += a[4] * x[i - 4] + a[5] * x[i - 5] + a[6] * x[i - 6] + a[7] * x[i - 7] ; s += a[8] * x[i - 8] + a[9] * x[i - 9] + a[10] * x[i - 10]; y[i] = (s + 0x800) >> 12; if (abs(y[i]) > 32767) { /* go to safe mode */ for (i = 0; i < 40; i++) { s = a[0] * x[i]; for (j = 1; j <= 10; j++) { s += a[j] * x[i - j]; if (s > 1073741823) { s = 1073741823; } else if (s < -1073741824) { s = -1073741824; } } y[i] = (s + 0x800) >> 12; if (abs(y[i]) > 32767) y[i] = (y[i] & 0x80000000) ? -32768 : 32767; } return; } } return; } /* * agc * * * Parameters: * st->past_gain B: gain memory * sig_in I: Post_Filter input signal * sig_out B: Post_Filter output signal * agc_fac I: AGC factor * * Function: * Scales the Post_Filter output on a subframe basis * * Returns: * void */ __device__ static void agc(agcState *st, Word32 *sig_in, Word32 *sig_out, Word16 agc_fac) { Word32 s, gain_in, gain_out, g0, gain; int exp, i; /* calculate gain_out with exponent */ s = energy_new(sig_out); if (s == 0) { st->past_gain = 0; return; } exp = 0; i = s; while (!(i & 0x40000000)) { exp++; i = i << 1; } exp -= 1; if (exp & 0x80000000) { s >>= 1; } else { s <<= exp; } gain_out = (s + 0x00008000L) >> 16; /* calculate gain_in with exponent */ s = energy_new(sig_in); if (s == 0) { g0 = 0; } else { i = 0; while (!(s & 0x40000000)) { i++; s = s << 1; } s = s + 0x00008000L; if (s >= 0) gain_in = s >> 16; else gain_in = 32767; exp = (exp - i); /* * g0 = (1-agc_fac) * sqrt(gain_in/gain_out); */ /* s = gain_out / gain_in */ s = (gain_out << 15) / gain_in; exp = 7 - exp; if (exp > 0) { if (exp > 31) { if (s) { s = 2147483647; } } else { s = s << exp; } } else s = (s >> (-exp)); if (s < 0) s = 2147483647; s = Inv_sqrt(s); i = ((s << 9) + 0x00008000L) >> 16; if (i & 0xFFFF8000) i = 32767; /* g0 = i * (1-agc_fac) */ g0 = (i * (32767 - agc_fac)) >> 15; } /* * compute gain[n] = agc_fac * gain[n-1] + (1-agc_fac) * sqrt(gain_in/gain_out) * sig_out[n] = gain[n] * sig_out[n] */ gain = st->past_gain; for (i = 0; i < L_SUBFR; i++) { gain = (gain * agc_fac) >> 15; gain = gain + g0; sig_out[i] = (sig_out[i] * gain) >> 12; if (labs(sig_out[i]) > 32767) sig_out[i] = (sig_out[i] & 0x8000000) ? -32768 : 32767; } st->past_gain = gain; return; } /* * Post_Filter * * * Parameters: * st B: post filter states * mode I: AMR mode * syn B: synthesis speech * Az_4 I: interpolated LPC parameters in all subfr. * * Function: * Post_Filtering of synthesis speech. * * inverse filtering of syn[] through A(z/0.7) to get res2[] * tilt compensation filtering; 1 - MU*k*z^-1 * synthesis filtering through 1/A(z/0.75) * adaptive gain control * * Returns: * void */ __device__ static void Post_Filter(Post_FilterState *st, enum Mode mode, Word32 *syn, Word32 *Az_4) { Word32 h[22], Ap3[MP1], Ap4[MP1]; /* bandwidth expanded LP parameters */ Word32 tmp, i_subfr, i, temp1, temp2, overflow = 0; Word32 *Az, *p1, *p2, *syn_work = &st->synth_buf[M]; const Word32 *pgamma3 = &gamma3[0]; const Word32 *pgamma4 = &gamma4_gamma3_MR122[0]; /* * Post filtering */ memcpy(syn_work, syn, L_FRAME << 2); Az = Az_4; if ((mode == MR122) || (mode == MR102)) { pgamma3 = &gamma4_gamma3_MR122[0]; pgamma4 = &gamma4_MR122[0]; } for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR) { /* Find weighted filter coefficients Ap3[] and Ap[4] */ Ap3[0] = Az[0]; Ap4[0] = Az[0]; for (i = 1; i <= 10; i++) { Ap3[i] = (Az[i] * pgamma3[i - 1] + 0x4000) >> 15; Ap4[i] = (Az[i] * pgamma4[i - 1] + 0x4000) >> 15; } /* filtering of synthesis speech by A(z/0.7) to find res2[] */ Residu40(Ap3, &syn_work[i_subfr], st->res2); /* tilt compensation filter */ /* impulse response of A(z/0.7)/A(z/0.75) */ memcpy(h, Ap3, MP1 << 2); memset(&h[M + 1], 0, (22 - M - 1) << 2); Syn_filt(Ap4, h, h, 22, &h[M + 1], 0); /* 1st correlation of h[] */ tmp = 16777216 + h[1] * h[1]; for (i = 2; i < 22; i++) { tmp += h[i] * h[i]; if (tmp > 0x3FFF8000) break; } temp1 = tmp >> 15; if (temp1 & 0xFFFF8000) temp1 = 32767; tmp = h[0] * h[1]; for (i = 1; i < 21; i++) { tmp += h[i] * h[i + 1]; if (abs(tmp) > 1073741823) tmp = 1073741823; } temp2 = tmp >> 15; if (temp2 <= 0) { temp2 = 0; } else { tmp = temp2 * 26214; temp2 = (tmp & 0xffff8000) / temp1; } /* preemphasis */ p1 = st->res2 + 39; p2 = p1 - 1; tmp = *p1; do { *p1 = *p1 - ((temp2 * *p2--) >> 15); if (abs(*p1) > 32767) { *p1 = (*p1 & 0x80000000) ? -32768 : 32767; } p1--; *p1 = *p1 - ((temp2 * *p2--) >> 15); if (abs(*p1) > 32767) { *p1 = (*p1 & 0x80000000) ? -32768 : 32767; } p1--; *p1 = *p1 - ((temp2 * *p2--) >> 15); if (abs(*p1) > 32767) { *p1 = (*p1 & 0x80000000) ? -32768 : 32767; } p1--; } while (p1 > st->res2); *p1 = *p1 - ((temp2 * st->preemph_state_mem_pre) >> 15); if (abs(*p1) > 32767) { *p1 = (*p1 & 0x80000000) ? -32768 : 32767; } st->preemph_state_mem_pre = tmp; /* filtering through 1/A(z/0.75) */ overflow = Syn_filt(Ap4, st->res2, &syn[i_subfr], L_SUBFR, st->mem_syn_pst, 0); if (overflow) { Syn_filt_overflow(Ap4, st->res2, &syn[i_subfr], L_SUBFR, st->mem_syn_pst, 1); overflow = 0; } else { memcpy(st->mem_syn_pst, &syn[i_subfr + 30], 40); } /* scale output to input */ agc(&st->agc_state, &syn_work[i_subfr], &syn[i_subfr], AGC_FAC); Az += MP1; } /* update syn_work[] buffer */ memcpy(&syn_work[-M], &syn_work[L_FRAME - M], M << 2); return; } /* * Post_Process * * * Parameters: * st B: post filter states * signal B: signal * * Function: * Postprocessing of input speech. * * 2nd order high pass filtering with cut off frequency at 60 Hz. * Multiplication of output by two. * * * Returns: * void */ __device__ static void Post_Process(Post_ProcessState *st, Word32 signal[]) { Word32 x2, tmp, i = 0; Word32 mask = 0x40000000; do { x2 = st->x1; st->x1 = st->x0; st->x0 = signal[i]; /* * y[i] = b[0]*x[i]*2 + b[1]*x[i-1]*2 + b140[2]*x[i-2]/2 * + a[1]*y[i-1] + a[2] * y[i-2]; */ tmp = (st->y1_hi * 15836) + (((st->y1_lo * 15836) & (Word32)0xffff8000) >> 15); tmp += (st->y2_hi * -7667) + (((st->y2_lo * (-7667)) & (Word32)0xffff8000) >> 15); tmp += st->x0 * 7699; tmp += st->x1 * -15398; if (((tmp >> 1) ^ tmp) & mask) tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823; tmp += x2 * 7699; if (((tmp >> 1) ^ tmp) & mask) tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823; tmp = tmp << 1; if (((tmp >> 1) ^ tmp) & mask) tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823; tmp = tmp << 1; if (((tmp >> 1) ^ tmp) & mask) tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823; if (labs(tmp) < 536862720) { signal[i++] = (tmp + 0x00002000L) >> 14; } else if (tmp > 0) { signal[i++] = 32767; } else { signal[i++] = -32768; } st->y2_hi = st->y1_hi; st->y2_lo = st->y1_lo; st->y1_hi = tmp >> 15; st->y1_lo = ((tmp << 1) - (st->y1_hi << 16)) >> 1; } while (i < 160); return; } /* * Speech_Decode_Frame * * * Parameters: * st B: decoder memory * mode I: AMR mode * parm I: speech parameters * frame_type I: Frame type * synth O: synthesis speech * Function: * Decode one frame * * Returns: * void */ __device__ void Speech_Decode_Frame(Speech_Decode_FrameState* state, enum Mode mode, Word16 *parm, enum RXFrameType frame_type, Word16 *synth) { Word32 Az_dec[AZ_SIZE]; /* Decoded Az for post-filter in 4 subframes*/ Word32 synth_speech[L_FRAME]; Word32 i; /* Synthesis */ Decoder_amr(&state->decoder_amrState, mode, parm, frame_type, synth_speech, Az_dec); Post_Filter(&state->post_state, mode, synth_speech, Az_dec); /* post HP filter, and 15->16 bits */ Post_Process(&state->postHP_state, synth_speech); for (i = 0; i < L_FRAME; i++) { #ifndef NO13BIT /* Truncate to 13 bits */ synth[i] = (Word16)(synth_speech[i] & 0xfff8); #else synth[i] = (Word16)(synth_speech[i]); #endif } return; } /* * Post_Process_reset * * * Parameters: * state B: state structure * * Function: * Resets state memory * * Returns: * -1 failure */ __device__ static int Post_Process_reset(Post_ProcessState *state) { state->y2_hi = 0; state->y2_lo = 0; state->y1_hi = 0; state->y1_lo = 0; state->x0 = 0; state->x1 = 0; return 0; } /* * Decoder_amr_init * * * Parameters: * state O: state structure * * Function: * Allocates state memory and initializes state memory * * Returns: * success = 0 */ __device__ static int Decoder_amr_init(Decoder_amrState *state) { Decoder_amr_reset(state, MR475); return 0; } /* * Post_Filter_reset * * * Parameters: * state B: state structure * * Function: * Resets state memory * * Returns: * -1 failure */ __device__ static int Post_Filter_reset(Post_FilterState *state) { state->preemph_state_mem_pre = 0; state->agc_state.past_gain = 4096; memset(state->mem_syn_pst, 0, M << 2); memset(state->res2, 0, L_SUBFR << 2); memset(state->synth_buf, 0, (L_FRAME + M) << 2); return 0; } /* * Post_Filter_init * * * Parameters: * state O: state structure * * Function: * Allocates state memory and initializes state memory * * Returns: * success = 0 */ __device__ static int Post_Filter_init(Post_FilterState* state) { Post_Filter_reset(state); return 0; } /* * Post_Process_init * * * Parameters: * state O: state structure * * Function: * Allocates state memory and initializes state memory * * Returns: * success = 0 */ __device__ static int Post_Process_init(Post_ProcessState *state) { Post_Process_reset(state); return 0; } /* * Speech_Decode_Frame_reset * * * Parameters: * state B: state structure * * Function: * Resets state memory * * Returns: * -1 = failure */ __device__ int Speech_Decode_Frame_reset(Speech_Decode_FrameState* state) { Decoder_amr_reset(&state->decoder_amrState, (enum Mode)0); Post_Filter_reset(&state->post_state); Post_Process_reset(&state->postHP_state); return 0; } /* * Speech_Decode_Frame_init * * * Parameters: * state O: state structure * * Function: * Allocates state memory and initializes state memory * * Returns: * success = 0 */ __device__ void Speech_Decode_Frame_init(Speech_Decode_FrameState* state) { Decoder_amr_init(&state->decoder_amrState); Post_Filter_init(&state->post_state); Post_Process_init(&state->postHP_state); return; } __device__ enum Mode DecoderMMS(Word16 *param, UWord8 *stream, enum RXFrameType *frame_type, enum Mode *speech_mode, Word16 *q_bit) { enum Mode mode; Word32 j; Word16 *mask; memset(param, 0, PRMNO_MR122 << 1); *q_bit = 0x01 & (*stream >> 2); mode = Mode(0x0F & (*stream >> 3)); stream++; if (mode == MRDTX) { mask = order_MRDTX; for (j = 1; j < 36; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } /* get SID type bit */ *frame_type = RX_SID_FIRST; if (*stream & 0x80) *frame_type = RX_SID_UPDATE; /* since there is update, use it */ /* *frame_type = RX_SID_UPDATE; */ /* speech mode indicator */ *speech_mode = Mode((*stream >> 4) & 0x07); *speech_mode = Mode(((*speech_mode & 0x0001) << 2) | (*speech_mode & 0x0002) | ((*speech_mode & 0x0004) >> 2)); } else if (mode == 15) { *frame_type = RX_NO_DATA; } else if (mode == MR475) { mask = order_MR475; for (j = 1; j < 96; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR515) { mask = order_MR515; for (j = 1; j < 104; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR59) { mask = order_MR59; for (j = 1; j < 119; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR67) { mask = order_MR67; for (j = 1; j < 135; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR74) { mask = order_MR74; for (j = 1; j < 149; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR795) { mask = order_MR795; for (j = 1; j < 160; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR102) { mask = order_MR102; for (j = 1; j < 205; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR122) { mask = order_MR122; for (j = 1; j < 245; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else *frame_type = RX_SPEECH_BAD; return mode; } /* * Decoder_Interface_reset * * * Parameters: * st O: state struct * * Function: * Reset homing frame counter * * Returns: * void */ __device__ void Decoder_Interface_reset(dec_interface_State *st) { st->reset_flag_old = 1; st->prev_ft = RX_SPEECH_GOOD; st->prev_mode = MR475; /* minimum bitrate */ } /* * Decoder_Interface_init * * * Parameters: * void * * Function: * Allocates state memory and initializes state memory * * Returns: * success : pointer to structure * failure : NULL */ __device__ void Decoder_Interface_init(dec_interface_State* state) { Speech_Decode_Frame_init(&state->decoder_State); Decoder_Interface_reset(state); } /* * Decoder_Interface_Decode * * * Parameters: * st B: state structure * bits I: bit stream * synth O: synthesized speech * bfi I: bad frame indicator * * Function: * Decode bit stream to synthesized speech * * Returns: * Void */ __device__ void Decoder_Interface_Decode(dec_interface_State* state, UWord8 *bits, Word16 *synth, int bfi) { enum Mode mode; /* AMR mode */ enum Mode speech_mode = MR475; /* speech mode */ Word16 prm[PRMNO_MR122]; /* AMR parameters */ enum RXFrameType frame_type; /* frame type */ const Word16 *homing; /* pointer to homing frame */ Word16 homingSize; /* frame size for homing frame */ Word32 i; /* counter */ Word32 resetFlag = 1; /* homing frame */ Word16 q_bit; /* * extract mode information and frametype, * octets to parameters */ mode = DecoderMMS(prm, bits, &frame_type, &speech_mode, &q_bit); if (!bfi) bfi = 1 - q_bit; if (bfi == 1) { if (mode <= MR122) { frame_type = RX_SPEECH_BAD; } else if (frame_type != RX_NO_DATA) { frame_type = RX_SID_BAD; mode = state->prev_mode; } } else { if (frame_type == RX_SID_FIRST || frame_type == RX_SID_UPDATE) { mode = speech_mode; } else if (frame_type == RX_NO_DATA) { mode = state->prev_mode; } /* * if no mode information * guess one from the previous frame */ if (frame_type == RX_SPEECH_BAD) { mode = state->prev_mode; if (state->prev_ft >= RX_SID_FIRST) { frame_type = RX_SID_BAD; } } } /* test for homing frame */ if (state->reset_flag_old == 1) { switch (mode) { case MR122: homing = dhf_MR122; homingSize = 18; break; case MR102: homing = dhf_MR102; homingSize = 12; break; case MR795: homing = dhf_MR795; homingSize = 8; break; case MR74: homing = dhf_MR74; homingSize = 7; break; case MR67: homing = dhf_MR67; homingSize = 7; break; case MR59: homing = dhf_MR59; homingSize = 7; break; case MR515: homing = dhf_MR515; homingSize = 7; break; case MR475: homing = dhf_MR475; homingSize = 7; break; default: homing = NULL; homingSize = 0; break; } for (i = 0; i < homingSize; i++) { resetFlag = prm[i] ^ homing[i]; if (resetFlag) break; } } if ((resetFlag == 0) && (state->reset_flag_old != 0)) { for (i = 0; i < 160; i++) { synth[i] = EHF_MASK; } } else Speech_Decode_Frame(&state->decoder_State, mode, prm, frame_type, synth); if (state->reset_flag_old == 0) { /* check whole frame */ switch (mode) { case MR122: homing = dhf_MR122; homingSize = PRMNO_MR122; break; case MR102: homing = dhf_MR102; homingSize = PRMNO_MR102; break; case MR795: homing = dhf_MR795; homingSize = PRMNO_MR795; break; case MR74: homing = dhf_MR74; homingSize = PRMNO_MR74; break; case MR67: homing = dhf_MR67; homingSize = PRMNO_MR67; break; case MR59: homing = dhf_MR59; homingSize = PRMNO_MR59; break; case MR515: homing = dhf_MR515; homingSize = PRMNO_MR515; break; case MR475: homing = dhf_MR475; homingSize = PRMNO_MR475; break; default: homing = NULL; homingSize = 0; } for (i = 0; i < homingSize; i++) { resetFlag = prm[i] ^ homing[i]; if (resetFlag) break; } } /* reset decoder if current frame is a homing frame */ if (resetFlag == 0) { Speech_Decode_Frame_reset(&state->decoder_State); } state->reset_flag_old = !resetFlag; state->prev_ft = frame_type; state->prev_mode = mode; }
c6ee29682c7bffe29d40c4ba5f60f5c48230c147.cu
/* * sp_dec.c * * * Project: * AMR Floating-Point Codec * * Contains: * This module contains all the functions needed decoding AMR * encoder parameters to 16-bit speech samples * */ /* * include files */ #include <stdio.h> #include <stdlib.h> #include <memory.h> #include <math.h> #include <float.h> #include "typedef.h" #include "dec.h" #include "cuda_runtime.h" #define PRMNO_MR475 17 #define PRMNO_MR515 19 #define PRMNO_MR59 19 #define PRMNO_MR67 19 #define PRMNO_MR74 19 #define PRMNO_MR795 23 #define PRMNO_MR102 39 #define PRMNO_MR122 57 #define PRMNO_MRDTX 5 /* * tables */ __device__ static const UWord8 block_size[16] = { 13, 14, 16, 18, 20, 21, 27, 32, 6 , 0 , 0 , 0 , 0 , 0 , 0 , 1 }; __device__ static const UWord8 toc_byte[16] = { 0x04, 0x0C, 0x14, 0x1C, 0x24, 0x2C, 0x34, 0x3C, 0x44, 0x4C, 0x54, 0x5C, 0x64, 0x6C, 0x74, 0x7C }; /* Subjective importance of the speech encoded bits */ __device__ static Word16 order_MR475[] = { 0, 0x80, 0, 0x40, 0, 0x20, 0, 0x10, 0, 0x8, 0, 0x4, 0, 0x2, 0, 0x1, 1, 0x80, 1, 0x40, 1, 0x20, 1, 0x10, 1, 0x8, 1, 0x4, 1, 0x2, 1, 0x1, 3, 0x80, 3, 0x40, 3, 0x20, 3, 0x10, 3, 0x8, 3, 0x4, 7, 0x8, 7, 0x4, 10, 0x8, 10, 0x4, 14, 0x8, 14, 0x4, 6, 0x1, 6, 0x2, 6, 0x4, 6, 0x8, 13, 0x1, 13, 0x2, 13, 0x4, 13, 0x8, 2, 0x20, 2, 0x10, 2, 0x4, 2, 0x1, 13, 0x10, 13, 0x20, 13, 0x40, 13, 0x80, 3, 0x2, 3, 0x1, 6, 0x10, 6, 0x20, 6, 0x40, 6, 0x80, 5, 0x2, 5, 0x1, 2, 0x40, 2, 0x8, 2, 0x2, 7, 0x2, 7, 0x1, 9, 0x2, 9, 0x1, 10, 0x2, 10, 0x1, 12, 0x2, 12, 0x1, 14, 0x2, 14, 0x1, 16, 0x2, 16, 0x1, 4, 0x20, 4, 0x10, 4, 0x4, 4, 0x2, 8, 0x20, 8, 0x10, 8, 0x4, 8, 0x2, 11, 0x20, 11, 0x10, 11, 0x4, 11, 0x2, 15, 0x20, 15, 0x10, 15, 0x4, 15, 0x2, 4, 0x8, 8, 0x8, 11, 0x8, 15, 0x8, 4, 0x1, 8, 0x1, 11, 0x1, 15, 0x1, 4, 0x40, 8, 0x40, 11, 0x40, 15, 0x40 }; __device__ static Word16 order_MR515[] = { 0, 0x1, 0, 0x2, 0, 0x4, 0, 0x8, 0, 0x10, 0, 0x20, 0, 0x40, 0, 0x80, 1, 0x1, 1, 0x2, 1, 0x4, 1, 0x8, 1, 0x10, 1, 0x20, 1, 0x40, 1, 0x80, 3, 0x80, 3, 0x40, 3, 0x20, 3, 0x10, 3, 0x8, 7, 0x8, 11, 0x8, 15, 0x8, 6, 0x1, 6, 0x2, 6, 0x4, 10, 0x1, 10, 0x2, 10, 0x4, 14, 0x1, 14, 0x2, 14, 0x4, 18, 0x1, 18, 0x2, 18, 0x4, 6, 0x8, 10, 0x8, 14, 0x8, 18, 0x8, 3, 0x4, 7, 0x4, 11, 0x4, 15, 0x4, 2, 0x10, 6, 0x10, 10, 0x10, 14, 0x10, 18, 0x10, 3, 0x2, 7, 0x2, 11, 0x2, 2, 0x20, 2, 0x4, 2, 0x1, 6, 0x20, 10, 0x20, 14, 0x20, 18, 0x20, 2, 0x2, 3, 0x1, 7, 0x1, 11, 0x1, 15, 0x2, 2, 0x8, 2, 0x40, 15, 0x1, 5, 0x1, 5, 0x2, 9, 0x1, 9, 0x2, 13, 0x1, 4, 0x4, 8, 0x4, 12, 0x4, 16, 0x4, 13, 0x2, 17, 0x1, 17, 0x2, 4, 0x2, 8, 0x2, 12, 0x2, 16, 0x2, 4, 0x20, 8, 0x20, 4, 0x10, 8, 0x10, 12, 0x20, 12, 0x10, 16, 0x20, 16, 0x10, 4, 0x40, 8, 0x40, 12, 0x40, 16, 0x40, 4, 0x1, 8, 0x1, 12, 0x1, 16, 0x1, 4, 0x8, 8, 0x8, 12, 0x8, 16, 0x8 }; __device__ static Word16 order_MR59[] = { 0, 0x80, 0, 0x40, 0, 0x8, 0, 0x4, 0, 0x10, 0, 0x2, 0, 0x1, 0, 0x20, 1, 0x8, 1, 0x2, 1, 0x100, 1, 0x80, 1, 0x20, 1, 0x10, 1, 0x4, 1, 0x40, 1, 0x1, 3, 0x20, 11, 0x20, 3, 0x10, 11, 0x10, 3, 0x40, 11, 0x40, 3, 0x80, 11, 0x80, 3, 0x8, 11, 0x8, 7, 0x8, 15, 0x8, 6, 0x1, 10, 0x1, 14, 0x1, 18, 0x1, 3, 0x4, 11, 0x4, 7, 0x4, 15, 0x4, 6, 0x2, 10, 0x2, 14, 0x2, 18, 0x2, 7, 0x2, 15, 0x2, 3, 0x2, 11, 0x2, 3, 0x1, 11, 0x1, 6, 0x4, 10, 0x4, 14, 0x4, 18, 0x4, 6, 0x8, 10, 0x8, 14, 0x8, 18, 0x8, 6, 0x10, 10, 0x10, 14, 0x10, 18, 0x10, 2, 0x40, 2, 0x10, 2, 0x4, 2, 0x8, 2, 0x80, 2, 0x100, 2, 0x20, 2, 0x2, 17, 0x1, 5, 0x2, 13, 0x2, 17, 0x2, 9, 0x2, 9, 0x1, 5, 0x1, 13, 0x1, 2, 0x1, 6, 0x20, 10, 0x20, 14, 0x20, 18, 0x20, 7, 0x1, 15, 0x1, 4, 0x4, 8, 0x4, 12, 0x4, 16, 0x4, 4, 0x8, 8, 0x8, 12, 0x8, 16, 0x8, 4, 0x40, 8, 0x40, 12, 0x40, 16, 0x40, 4, 0x80, 8, 0x80, 12, 0x80, 16, 0x80, 4, 0x100, 8, 0x100, 12, 0x100, 16, 0x100, 4, 0x1, 8, 0x1, 12, 0x1, 16, 0x1, 4, 0x2, 8, 0x2, 12, 0x2, 16, 0x2, 4, 0x10, 8, 0x10, 12, 0x10, 16, 0x10, 4, 0x20, 8, 0x20, 12, 0x20, 16, 0x20 }; __device__ static Word16 order_MR67[] = { 0, 0x80, 0, 0x40, 0, 0x8, 0, 0x10, 0, 0x4, 0, 0x2, 1, 0x8, 0, 0x1, 0, 0x20, 1, 0x100, 1, 0x80, 1, 0x20, 1, 0x2, 1, 0x10, 1, 0x4, 1, 0x40, 3, 0x20, 11, 0x20, 3, 0x10, 11, 0x10, 3, 0x40, 11, 0x40, 3, 0x80, 11, 0x80, 3, 0x8, 11, 0x8, 1, 0x1, 7, 0x8, 15, 0x8, 7, 0x4, 15, 0x4, 3, 0x4, 11, 0x4, 7, 0x2, 15, 0x2, 6, 0x40, 10, 0x40, 14, 0x40, 18, 0x40, 3, 0x2, 11, 0x2, 6, 0x8, 10, 0x8, 14, 0x8, 18, 0x8, 6, 0x4, 10, 0x4, 14, 0x4, 18, 0x4, 7, 0x1, 15, 0x1, 3, 0x1, 11, 0x1, 2, 0x40, 2, 0x4, 6, 0x2, 10, 0x2, 14, 0x2, 18, 0x2, 2, 0x10, 2, 0x8, 2, 0x80, 2, 0x100, 2, 0x20, 2, 0x2, 2, 0x1, 6, 0x10, 10, 0x10, 14, 0x10, 18, 0x10, 5, 0x1, 9, 0x1, 13, 0x1, 17, 0x1, 6, 0x1, 10, 0x1, 14, 0x1, 18, 0x1, 5, 0x2, 9, 0x2, 13, 0x2, 17, 0x2, 18, 0x20, 14, 0x20, 10, 0x20, 6, 0x20, 5, 0x4, 9, 0x4, 13, 0x4, 17, 0x4, 4, 0x4, 8, 0x4, 12, 0x4, 16, 0x4, 4, 0x20, 8, 0x20, 12, 0x20, 16, 0x20, 4, 0x40, 8, 0x40, 12, 0x40, 16, 0x40, 4, 0x200, 8, 0x200, 12, 0x200, 16, 0x200, 4, 0x400, 8, 0x400, 12, 0x400, 16, 0x400, 4, 0x1, 8, 0x1, 12, 0x1, 16, 0x1, 4, 0x2, 8, 0x2, 12, 0x2, 16, 0x2, 4, 0x8, 8, 0x8, 12, 0x8, 16, 0x8, 4, 0x10, 8, 0x10, 12, 0x10, 16, 0x10, 4, 0x80, 8, 0x80, 12, 0x80, 16, 0x80, 4, 0x100, 8, 0x100, 12, 0x100, 16, 0x100 }; __device__ static Word16 order_MR74[] = { 0, 0x80, 0, 0x40, 0, 0x20, 0, 0x10, 0, 0x8, 0, 0x4, 0, 0x2, 0, 0x1, 1, 0x100, 1, 0x80, 1, 0x40, 1, 0x20, 1, 0x10, 1, 0x8, 1, 0x4, 1, 0x2, 1, 0x1, 3, 0x80, 11, 0x80, 3, 0x40, 11, 0x40, 3, 0x20, 11, 0x20, 3, 0x10, 11, 0x10, 3, 0x8, 11, 0x8, 6, 0x40, 10, 0x40, 14, 0x40, 18, 0x40, 6, 0x20, 10, 0x20, 14, 0x20, 18, 0x20, 6, 0x8, 10, 0x8, 14, 0x8, 18, 0x8, 6, 0x4, 10, 0x4, 14, 0x4, 18, 0x4, 7, 0x10, 15, 0x10, 7, 0x8, 15, 0x8, 2, 0x10, 2, 0x8, 2, 0x4, 2, 0x100, 2, 0x80, 2, 0x40, 3, 0x4, 7, 0x4, 11, 0x4, 15, 0x4, 6, 0x2, 10, 0x2, 14, 0x2, 18, 0x2, 2, 0x20, 2, 0x2, 2, 0x1, 5, 0x1, 9, 0x1, 13, 0x1, 17, 0x1, 6, 0x1, 10, 0x1, 14, 0x1, 18, 0x1, 5, 0x2, 9, 0x2, 13, 0x2, 17, 0x2, 5, 0x4, 9, 0x4, 6, 0x10, 10, 0x10, 14, 0x10, 18, 0x10, 13, 0x4, 17, 0x4, 5, 0x8, 9, 0x8, 13, 0x8, 17, 0x8, 3, 0x2, 3, 0x1, 7, 0x2, 7, 0x1, 11, 0x2, 11, 0x1, 15, 0x2, 15, 0x1, 4, 0x20, 4, 0x10, 4, 0x8, 4, 0x4, 4, 0x2, 4, 0x1, 8, 0x20, 8, 0x10, 8, 0x8, 8, 0x4, 8, 0x2, 8, 0x1, 12, 0x20, 12, 0x10, 12, 0x8, 12, 0x4, 12, 0x2, 12, 0x1, 16, 0x20, 16, 0x10, 16, 0x8, 16, 0x4, 16, 0x2, 16, 0x1, 4, 0x1000, 8, 0x1000, 12, 0x1000, 16, 0x1000, 4, 0x800, 8, 0x800, 12, 0x800, 16, 0x800, 4, 0x400, 8, 0x400, 12, 0x400, 16, 0x400, 4, 0x200, 8, 0x200, 12, 0x200, 16, 0x200, 4, 0x100, 8, 0x100, 12, 0x100, 16, 0x100, 4, 0x80, 8, 0x80, 12, 0x80, 16, 0x80, 4, 0x40, 8, 0x40, 12, 0x40, 16, 0x40 }; __device__ static Word16 order_MR795[] = { 0, 0x1, 0, 0x2, 0, 0x4, 0, 0x8, 0, 0x10, 0, 0x20, 0, 0x40, 1, 0x8, 1, 0x2, 1, 0x100, 1, 0x80, 1, 0x20, 1, 0x10, 1, 0x4, 1, 0x40, 1, 0x1, 2, 0x40, 2, 0x10, 2, 0x4, 2, 0x8, 2, 0x80, 2, 0x100, 2, 0x20, 7, 0x10, 12, 0x10, 17, 0x10, 22, 0x10, 7, 0x8, 12, 0x8, 17, 0x8, 22, 0x8, 7, 0x4, 12, 0x4, 17, 0x4, 22, 0x4, 6, 0x8, 11, 0x8, 16, 0x8, 21, 0x8, 6, 0x4, 11, 0x4, 16, 0x4, 21, 0x4, 3, 0x80, 13, 0x80, 3, 0x40, 13, 0x40, 3, 0x20, 13, 0x20, 3, 0x10, 13, 0x10, 3, 0x8, 13, 0x8, 8, 0x20, 18, 0x20, 8, 0x10, 18, 0x10, 8, 0x8, 18, 0x8, 7, 0x2, 12, 0x2, 17, 0x2, 22, 0x2, 3, 0x4, 13, 0x4, 8, 0x4, 18, 0x4, 0, 0x80, 0, 0x100, 2, 0x2, 2, 0x1, 3, 0x2, 13, 0x2, 3, 0x1, 13, 0x1, 8, 0x2, 18, 0x2, 8, 0x1, 18, 0x1, 6, 0x2, 11, 0x2, 16, 0x2, 21, 0x2, 7, 0x1, 12, 0x1, 17, 0x1, 22, 0x1, 6, 0x1, 11, 0x1, 16, 0x1, 21, 0x1, 15, 0x1, 15, 0x2, 15, 0x4, 4, 0x2, 9, 0x2, 14, 0x2, 19, 0x2, 4, 0x10, 9, 0x10, 14, 0x10, 19, 0x10, 4, 0x80, 9, 0x80, 14, 0x80, 19, 0x80, 4, 0x800, 9, 0x800, 14, 0x800, 19, 0x800, 15, 0x8, 20, 0x1, 20, 0x2, 20, 0x4, 20, 0x8, 10, 0x1, 10, 0x2, 10, 0x4, 10, 0x8, 5, 0x1, 5, 0x2, 5, 0x4, 5, 0x8, 4, 0x1, 4, 0x4, 4, 0x8, 4, 0x20, 4, 0x100, 4, 0x1000, 9, 0x1, 9, 0x4, 9, 0x8, 9, 0x20, 9, 0x100, 9, 0x1000, 14, 0x1, 14, 0x4, 14, 0x8, 14, 0x20, 14, 0x100, 14, 0x1000, 19, 0x1, 19, 0x4, 19, 0x8, 19, 0x20, 19, 0x100, 19, 0x1000, 4, 0x40, 9, 0x40, 14, 0x40, 19, 0x40, 4, 0x400, 9, 0x400, 14, 0x400, 19, 0x400, 4, 0x200, 9, 0x200, 14, 0x200, 19, 0x200, 0, 0x1, 0, 0x2, 0, 0x4, 0, 0x8, 0, 0x10, 0, 0x20, 0, 0x40, 1, 0x8, 1, 0x2, 1, 0x100, 1, 0x80, 1, 0x20, 1, 0x10, 1, 0x4, 1, 0x40, 1, 0x1, 2, 0x40, 2, 0x10, 2, 0x4, 2, 0x8, 2, 0x80, 2, 0x100, 2, 0x20, 7, 0x10, 12, 0x10, 17, 0x10, 22, 0x10, 7, 0x8, 12, 0x8, 17, 0x8, 22, 0x8, 7, 0x4, 12, 0x4, 17, 0x4, 22, 0x4, 6, 0x8, 11, 0x8, 16, 0x8, 21, 0x8, 6, 0x4, 11, 0x4, 16, 0x4, 21, 0x4, 3, 0x80, 13, 0x80, 3, 0x40, 13, 0x40, 3, 0x20, 13, 0x20, 3, 0x10, 13, 0x10, 3, 0x8, 13, 0x8, 8, 0x20, 18, 0x20, 8, 0x10, 18, 0x10, 8, 0x8, 18, 0x8, 7, 0x2, 12, 0x2, 17, 0x2, 22, 0x2, 3, 0x4, 13, 0x4, 8, 0x4, 18, 0x4, 0, 0x80, 0, 0x100, 2, 0x2, 2, 0x1, 3, 0x2, 13, 0x2, 3, 0x1, 13, 0x1, 8, 0x2, 18, 0x2, 8, 0x1, 18, 0x1, 6, 0x2, 11, 0x2, 16, 0x2, 21, 0x2, 7, 0x1, 12, 0x1, 17, 0x1, 22, 0x1, 6, 0x1, 11, 0x1, 16, 0x1, 21, 0x1, 15, 0x1, 15, 0x2, 15, 0x4, 4, 0x2, 9, 0x2, 14, 0x2, 19, 0x2, 4, 0x10, 9, 0x10, 14, 0x10, 19, 0x10, 4, 0x80, 9, 0x80, 14, 0x80, 19, 0x80, 4, 0x800, 9, 0x800, 14, 0x800, 19, 0x800, 15, 0x8, 20, 0x1, 20, 0x2, 20, 0x4, 20, 0x8, 10, 0x1, 10, 0x2, 10, 0x4, 10, 0x8, 5, 0x1, 5, 0x2, 5, 0x4, 5, 0x8, 4, 0x1, 4, 0x4, 4, 0x8, 4, 0x20, 4, 0x100, 4, 0x1000, 9, 0x1, 9, 0x4, 9, 0x8, 9, 0x20, 9, 0x100, 9, 0x1000, 14, 0x1, 14, 0x4, 14, 0x8, 14, 0x20, 14, 0x100, 14, 0x1000, 19, 0x1, 19, 0x4, 19, 0x8, 19, 0x20, 19, 0x100, 19, 0x1000, 4, 0x40, 9, 0x40, 14, 0x40, 19, 0x40, 4, 0x400, 9, 0x400, 14, 0x400, 19, 0x400, 4, 0x200, 9, 0x200, 14, 0x200, 19, 0x200 }; __device__ static Word16 order_MR102[] = { 0, 0x1, 0, 0x2, 0, 0x4, 0, 0x8, 0, 0x10, 0, 0x20, 0, 0x40, 0, 0x80, 1, 0x1, 1, 0x2, 1, 0x4, 1, 0x8, 1, 0x10, 1, 0x20, 1, 0x40, 1, 0x80, 1, 0x100, 3, 0x80, 3, 0x40, 3, 0x20, 3, 0x10, 3, 0x8, 3, 0x4, 21, 0x80, 21, 0x40, 21, 0x20, 21, 0x10, 21, 0x8, 21, 0x4, 12, 0x10, 12, 0x8, 30, 0x10, 30, 0x8, 11, 0x40, 11, 0x8, 11, 0x4, 20, 0x40, 20, 0x8, 20, 0x4, 29, 0x40, 29, 0x8, 29, 0x4, 38, 0x40, 38, 0x8, 38, 0x4, 3, 0x2, 3, 0x1, 21, 0x2, 21, 0x1, 12, 0x4, 12, 0x2, 30, 0x4, 30, 0x2, 11, 0x20, 20, 0x20, 29, 0x20, 38, 0x20, 2, 0x40, 2, 0x4, 2, 0x10, 2, 0x8, 2, 0x80, 2, 0x100, 2, 0x20, 2, 0x2, 2, 0x1, 7, 0x1, 6, 0x1, 5, 0x1, 4, 0x1, 16, 0x1, 15, 0x1, 14, 0x1, 13, 0x1, 25, 0x1, 24, 0x1, 23, 0x1, 22, 0x1, 34, 0x1, 33, 0x1, 32, 0x1, 31, 0x1, 11, 0x2, 11, 0x10, 11, 0x1, 20, 0x2, 20, 0x10, 20, 0x1, 29, 0x2, 29, 0x10, 29, 0x1, 38, 0x2, 38, 0x10, 38, 0x1, 12, 0x1, 30, 0x1, 17, 0x200, 17, 0x100, 18, 0x100, 18, 0x200, 18, 0x80, 17, 0x80, 18, 0x20, 17, 0x20, 17, 0x40, 18, 0x40, 19, 0x40, 19, 0x20, 18, 0x10, 19, 0x8, 17, 0x10, 19, 0x10, 17, 0x8, 18, 0x8, 26, 0x200, 26, 0x100, 27, 0x100, 27, 0x200, 27, 0x80, 26, 0x80, 27, 0x20, 26, 0x20, 26, 0x40, 27, 0x40, 28, 0x40, 28, 0x20, 27, 0x10, 28, 0x8, 26, 0x10, 28, 0x10, 26, 0x8, 27, 0x8, 35, 0x200, 35, 0x100, 36, 0x100, 36, 0x200, 36, 0x80, 35, 0x80, 36, 0x20, 35, 0x20, 35, 0x40, 36, 0x40, 37, 0x40, 37, 0x20, 36, 0x10, 37, 0x8, 35, 0x10, 37, 0x10, 35, 0x8, 36, 0x8, 8, 0x200, 8, 0x100, 9, 0x100, 9, 0x200, 9, 0x80, 8, 0x80, 9, 0x20, 8, 0x20, 8, 0x40, 9, 0x40, 10, 0x40, 10, 0x20, 9, 0x10, 10, 0x8, 8, 0x10, 10, 0x10, 8, 0x8, 9, 0x8, 37, 0x4, 35, 0x1, 36, 0x1, 37, 0x1, 35, 0x4, 37, 0x2, 35, 0x2, 36, 0x4, 36, 0x2, 28, 0x4, 26, 0x1, 27, 0x1, 28, 0x1, 26, 0x4, 28, 0x2, 26, 0x2, 27, 0x4, 27, 0x2, 19, 0x4, 17, 0x1, 18, 0x1, 19, 0x1, 17, 0x4, 19, 0x2, 17, 0x2, 18, 0x4, 18, 0x2, 10, 0x4, 8, 0x1, 9, 0x1, 10, 0x1, 8, 0x4, 10, 0x2, 8, 0x2, 9, 0x4, 9, 0x2 }; __device__ static Word16 order_MR122[] = { 0, 0x40, 0, 0x20, 0, 0x10, 0, 0x8, 0, 0x4, 0, 0x2, 0, 0x1, 1, 0x80, 1, 0x40, 1, 0x20, 1, 0x10, 1, 0x8, 1, 0x4, 1, 0x2, 1, 0x1, 2, 0x1, 2, 0x100, 2, 0x80, 2, 0x40, 2, 0x20, 2, 0x10, 2, 0x8, 2, 0x4, 2, 0x2, 3, 0x80, 3, 0x40, 3, 0x20, 3, 0x10, 3, 0x8, 5, 0x100, 31, 0x100, 5, 0x80, 31, 0x80, 5, 0x40, 31, 0x40, 5, 0x20, 31, 0x20, 5, 0x10, 31, 0x10, 5, 0x8, 31, 0x8, 5, 0x4, 31, 0x4, 5, 0x2, 31, 0x2, 5, 0x1, 31, 0x1, 6, 0x8, 19, 0x8, 32, 0x8, 45, 0x8, 6, 0x4, 19, 0x4, 32, 0x4, 45, 0x4, 6, 0x2, 19, 0x2, 32, 0x2, 45, 0x2, 17, 0x10, 30, 0x10, 43, 0x10, 56, 0x10, 17, 0x8, 30, 0x8, 43, 0x8, 56, 0x8, 17, 0x4, 30, 0x4, 43, 0x4, 56, 0x4, 18, 0x20, 44, 0x20, 18, 0x10, 44, 0x10, 18, 0x8, 44, 0x8, 18, 0x4, 44, 0x4, 18, 0x2, 44, 0x2, 3, 0x4, 3, 0x2, 3, 0x1, 4, 0x20, 4, 0x10, 4, 0x8, 4, 0x4, 6, 0x1, 19, 0x1, 32, 0x1, 45, 0x1, 17, 0x2, 30, 0x2, 43, 0x2, 56, 0x2, 7, 0x8, 20, 0x8, 33, 0x8, 46, 0x8, 8, 0x8, 21, 0x8, 34, 0x8, 47, 0x8, 17, 0x1, 30, 0x1, 43, 0x1, 56, 0x1, 9, 0x8, 22, 0x8, 35, 0x8, 48, 0x8, 10, 0x8, 23, 0x8, 36, 0x8, 49, 0x8, 11, 0x8, 24, 0x8, 37, 0x8, 50, 0x8, 4, 0x2, 4, 0x1, 7, 0x1, 7, 0x2, 7, 0x4, 8, 0x1, 8, 0x2, 8, 0x4, 9, 0x1, 9, 0x2, 9, 0x4, 10, 0x1, 10, 0x2, 10, 0x4, 11, 0x1, 11, 0x2, 11, 0x4, 20, 0x1, 20, 0x2, 20, 0x4, 21, 0x1, 21, 0x2, 21, 0x4, 22, 0x1, 22, 0x2, 22, 0x4, 23, 0x1, 23, 0x2, 23, 0x4, 24, 0x1, 24, 0x2, 24, 0x4, 33, 0x1, 33, 0x2, 33, 0x4, 34, 0x1, 34, 0x2, 34, 0x4, 35, 0x1, 35, 0x2, 35, 0x4, 36, 0x1, 36, 0x2, 36, 0x4, 37, 0x1, 37, 0x2, 37, 0x4, 46, 0x1, 46, 0x2, 46, 0x4, 47, 0x1, 47, 0x2, 47, 0x4, 48, 0x1, 48, 0x2, 48, 0x4, 49, 0x1, 49, 0x2, 49, 0x4, 50, 0x1, 50, 0x2, 50, 0x4, 12, 0x1, 12, 0x2, 12, 0x4, 13, 0x1, 13, 0x2, 13, 0x4, 14, 0x1, 14, 0x2, 14, 0x4, 15, 0x1, 15, 0x2, 15, 0x4, 16, 0x1, 16, 0x2, 16, 0x4, 25, 0x1, 25, 0x2, 25, 0x4, 26, 0x1, 26, 0x2, 26, 0x4, 27, 0x1, 27, 0x2, 27, 0x4, 28, 0x1, 28, 0x2, 28, 0x4, 29, 0x1, 29, 0x2, 29, 0x4, 38, 0x1, 38, 0x2, 38, 0x4, 39, 0x1, 39, 0x2, 39, 0x4, 40, 0x1, 40, 0x2, 40, 0x4, 41, 0x1, 41, 0x2, 41, 0x4, 42, 0x1, 42, 0x2, 42, 0x4, 51, 0x1, 51, 0x2, 51, 0x4, 52, 0x1, 52, 0x2, 52, 0x4, 53, 0x1, 53, 0x2, 53, 0x4, 54, 0x1, 54, 0x2, 54, 0x4, 55, 0x1, 55, 0x2, 55, 0x4, 18, 0x1, 44, 0x1 }; __device__ static Word16 order_MRDTX[] = { 0, 0x4, 0, 0x2, 0, 0x1, 1, 0x80, 1, 0x40, 1, 0x20, 1, 0x10, 1, 0x8, 1, 0x4, 1, 0x2, 1, 0x1, 2, 0x100, 2, 0x80, 2, 0x40, 2, 0x20, 2, 0x10, 2, 0x8, 2, 0x4, 2, 0x2, 2, 0x1, 3, 0x100, 3, 0x80, 3, 0x40, 3, 0x20, 3, 0x10, 3, 0x8, 3, 0x4, 3, 0x2, 3, 0x1, 4, 0x20, 4, 0x10, 4, 0x8, 4, 0x4, 4, 0x2, 4, 0x1 }; /* Homing frames for the decoder */ __device__ static const Word16 dhf_MR475[PRMNO_MR475] = { 0x00F8, 0x009D, 0x001C, 0x0066, 0x0000, 0x0003, 0x0028, 0x000F, 0x0038, 0x0001, 0x000F, 0x0031, 0x0002, 0x0008, 0x000F, 0x0026, 0x0003 }; __device__ static const Word16 dhf_MR515[PRMNO_MR515] = { 0x00F8, 0x009D, 0x001C, 0x0066, 0x0000, 0x0003, 0x0037, 0x000F, 0x0000, 0x0003, 0x0005, 0x000F, 0x0037, 0x0003, 0x0037, 0x000F, 0x0023, 0x0003, 0x001F }; __device__ static const Word16 dhf_MR59[PRMNO_MR59] = { 0x00F8, 0x00E3, 0x002F, 0x00BD, 0x0000, 0x0003, 0x0037, 0x000F, 0x0001, 0x0003, 0x000F, 0x0060, 0x00F9, 0x0003, 0x0037, 0x000F, 0x0000, 0x0003, 0x0037 }; __device__ static const Word16 dhf_MR67[PRMNO_MR67] = { 0x00F8, 0x00E3, 0x002F, 0x00BD, 0x0002, 0x0007, 0x0000, 0x000F, 0x0098, 0x0007, 0x0061, 0x0060, 0x05C5, 0x0007, 0x0000, 0x000F, 0x0318, 0x0007, 0x0000 }; __device__ static const Word16 dhf_MR74[PRMNO_MR74] = { 0x00F8, 0x00E3, 0x002F, 0x00BD, 0x0006, 0x000F, 0x0000, 0x001B, 0x0208, 0x000F, 0x0062, 0x0060, 0x1BA6, 0x000F, 0x0000, 0x001B, 0x0006, 0x000F, 0x0000 }; __device__ static const Word16 dhf_MR795[PRMNO_MR795] = { 0x00C2, 0x00E3, 0x002F, 0x00BD, 0x0006, 0x000F, 0x000A, 0x0000, 0x0039, 0x1C08, 0x0007, 0x000A, 0x000B, 0x0063, 0x11A6, 0x000F, 0x0001, 0x0000, 0x0039, 0x09A0, 0x000F, 0x0002, 0x0001 }; __device__ static const Word16 dhf_MR102[PRMNO_MR102] = { 0x00F8, 0x00E3, 0x002F, 0x0045, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x001B, 0x0000, 0x0001, 0x0000, 0x0001, 0x0326, 0x00CE, 0x007E, 0x0051, 0x0062, 0x0000, 0x0000, 0x0000, 0x0000, 0x015A, 0x0359, 0x0076, 0x0000, 0x001B, 0x0000, 0x0000, 0x0000, 0x0000, 0x017C, 0x0215, 0x0038, 0x0030 }; __device__ static const Word16 dhf_MR122[PRMNO_MR122] = { 0x0004, 0x002A, 0x00DB, 0x0096, 0x002A, 0x0156, 0x000B, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0036, 0x000B, 0x0000, 0x000F, 0x000E, 0x000C, 0x000D, 0x0000, 0x0001, 0x0005, 0x0007, 0x0001, 0x0008, 0x0024, 0x0000, 0x0001, 0x0000, 0x0005, 0x0006, 0x0001, 0x0002, 0x0004, 0x0007, 0x0004, 0x0002, 0x0003, 0x0036, 0x000B, 0x0000, 0x0002, 0x0004, 0x0000, 0x0003, 0x0006, 0x0001, 0x0007, 0x0006, 0x0005, 0x0000 }; /* parameter sizes (# of bits), one table per mode */ __device__ static const Word16 bitno_MR475[PRMNO_MR475] = { 8, 8, 7, /* LSP VQ */ 8, 7, 2, 8, /* first subframe */ 4, 7, 2, /* second subframe */ 4, 7, 2, 8, /* third subframe */ 4, 7, 2 /* fourth subframe */ }; __device__ static const Word16 bitno_MR515[PRMNO_MR515] = { 8, 8, 7, /* LSP VQ */ 8, 7, 2, 6, /* first subframe */ 4, 7, 2, 6, /* second subframe */ 4, 7, 2, 6, /* third subframe */ 4, 7, 2, 6 /* fourth subframe */ }; __device__ static const Word16 bitno_MR59[PRMNO_MR59] = { 8, 9, 9, /* LSP VQ */ 8, 9, 2, 6, /* first subframe */ 4, 9, 2, 6, /* second subframe */ 8, 9, 2, 6, /* third subframe */ 4, 9, 2, 6 /* fourth subframe */ }; __device__ static const Word16 bitno_MR67[PRMNO_MR67] = { 8, 9, 9, /* LSP VQ */ 8, 11, 3, 7, /* first subframe */ 4, 11, 3, 7, /* second subframe */ 8, 11, 3, 7, /* third subframe */ 4, 11, 3, 7 /* fourth subframe */ }; __device__ static const Word16 bitno_MR74[PRMNO_MR74] = { 8, 9, 9, /* LSP VQ */ 8, 13, 4, 7, /* first subframe */ 5, 13, 4, 7, /* second subframe */ 8, 13, 4, 7, /* third subframe */ 5, 13, 4, 7 /* fourth subframe */ }; __device__ static const Word16 bitno_MR795[PRMNO_MR795] = { 9, 9, 9, /* LSP VQ */ 8, 13, 4, 4, 5, /* first subframe */ 6, 13, 4, 4, 5, /* second subframe */ 8, 13, 4, 4, 5, /* third subframe */ 6, 13, 4, 4, 5 /* fourth subframe */ }; __device__ static const Word16 bitno_MR102[PRMNO_MR102] = { 8, 9, 9, /* LSP VQ */ 8, 1, 1, 1, 1, 10, 10, 7, 7, /* first subframe */ 5, 1, 1, 1, 1, 10, 10, 7, 7, /* second subframe */ 8, 1, 1, 1, 1, 10, 10, 7, 7, /* third subframe */ 5, 1, 1, 1, 1, 10, 10, 7, 7 /* fourth subframe */ }; __device__ static const Word16 bitno_MR122[PRMNO_MR122] = { 7, 8, 9, 8, 6, /* LSP VQ */ 9, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5, /* first subframe */ 6, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5, /* second subframe */ 9, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5, /* third subframe */ 6, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 5 /* fourth subframe */ }; __device__ static const Word16 bitno_MRDTX[PRMNO_MRDTX] = { 3, 8, 9, 9, 6 }; #define PRMNO_MR475 17 #define PRMNO_MR515 19 #define PRMNO_MR59 19 #define PRMNO_MR67 19 #define PRMNO_MR74 19 #define PRMNO_MR795 23 #define PRMNO_MR102 39 #define PRMNO_MR122 57 #define PRMNO_MRDTX 5 /* * tables */ /* level adjustment for different modes Q11 */ __device__ static const Word16 dtx_log_en_adjust[9] = { -1023, /* MR475 */ -878, /* MR515 */ -732, /* MR59 */ -586, /* MR67 */ -440, /* MR74 */ -294, /* MR795 */ -148, /* MR102 */ 0, /* MR122 */ 0, /* MRDTX */ }; /* attenuation factors for codebook gain */ __device__ static const Word32 cdown[7] = { 32767, 32112, 32112, 32112, 32112, 32112, 22937 }; /* attenuation factors for adaptive codebook gain */ __device__ static const Word32 pdown[7] = { 32767, 32112, 32112, 26214, 9830, 6553, 6553 }; /* algebraic code book gain MA predictor coefficients */ __device__ static const Word32 pred[NPRED] = { 5571, 4751, 2785, 1556 }; /* algebraic code book gain MA predictor coefficients (MR122) */ __device__ static const Word32 pred_MR122[NPRED] = { 44, 37, 22, 12 }; __device__ static const Word32 gamma4_gamma3_MR122[M] = { 22938, 16057, 11240, 7868, 5508, 3856, 2699, 1889, 1322, 925 }; __device__ static const Word32 gamma3[M] = { 18022, 9912, 5451, 2998, 1649, 907, 499, 274, 151, 83 }; __device__ static const Word32 gamma4_MR122[M] = { 24576, 18432, 13824, 10368, 7776, 5832, 4374, 3281, 2461, 1846 }; /* adaptive codebook gain quantization table (MR122, MR795) */ #define NB_QUA_PITCH 16 __device__ static const Word32 qua_gain_pitch[NB_QUA_PITCH] = { 0, 3277, 6556, 8192, 9830, 11469, 12288, 13107, 13926, 14746, 15565, 16384, 17203, 18022, 18842, 19661 }; /* fixed codebook gain quantization table (MR122, MR795) */ #define NB_QUA_CODE 32 __device__ static const Word32 qua_gain_code[NB_QUA_CODE * 3] = { /* gain factor (g_fac) and quantized energy error (qua_ener_MR122, qua_ener) * are stored: * * qua_ener_MR122 = log2(g_fac) (not the rounded floating point value, but * the value the original EFR algorithm * calculates from g_fac [using Log2]) * qua_ener = 20*log10(g_fac); (rounded floating point value) * * * g_fac (Q11), * qua_ener_MR122 (Q10), * qua_ener (Q10) */ 159, -3776, -22731, 206, -3394, -20428, 268, -3005, -18088, 349, -2615, -15739, 419, -2345, -14113, 482, -2138, -12867, 554, -1932, -11629, 637, -1726, -10387, 733, -1518, -9139, 842, -1314, -7906, 969, -1106, -6656, 1114, -900, -5416, 1281, -694, -4173, 1473, -487, -2931, 1694, -281, -1688, 1948, -75, -445, 2241, 133, 801, 2577, 339, 2044, 2963, 545, 3285, 3408, 752, 4530, 3919, 958, 5772, 4507, 1165, 7016, 5183, 1371, 8259, 5960, 1577, 9501, 6855, 1784, 10745, 7883, 1991, 11988, 9065, 2197, 13231, 10425, 2404, 14474, 12510, 2673, 16096, 16263, 3060, 18429, 21142, 3448, 20763, 27485, 3836, 23097 }; /* gray coding table */ __device__ static const Word8 gray[8] = { 0, 1, 3, 2, 6, 4, 5, 7 }; /* gray decoding table */ __device__ static const Word32 dgray[8] = { 0, 1, 3, 2, 5, 6, 4, 7 }; /* table[i] = sqrt((i+16)*2^-6) * 2^15, i.e. sqrt(x) scaled Q15 */ __device__ static const Word32 sqrt_table[49] = { 16384, 16888, 17378, 17854, 18318, 18770, 19212, 19644, 20066, 20480, 20886, 21283, 21674, 22058, 22435, 22806, 23170, 23530, 23884, 24232, 24576, 24915, 25249, 25580, 25905, 26227, 26545, 26859, 27170, 27477, 27780, 28081, 28378, 28672, 28963, 29251, 29537, 29819, 30099, 30377, 30652, 30924, 31194, 31462, 31727, 31991, 32252, 32511, 32767 }; __device__ static const Word32 inv_sqrt_table[49] = { 32767, 31790, 30894, 30070, 29309, 28602, 27945, 27330, 26755, 26214, 25705, 25225, 24770, 24339, 23930, 23541, 23170, 22817, 22479, 22155, 21845, 21548, 21263, 20988, 20724, 20470, 20225, 19988, 19760, 19539, 19326, 19119, 18919, 18725, 18536, 18354, 18176, 18004, 17837, 17674, 17515, 17361, 17211, 17064, 16921, 16782, 16646, 16514, 16384 }; /* table used inbase 2 logharithm computation */ __device__ static const Word32 log2_table[33] = { 0, 1455, 2866, 4236, 5568, 6863, 8124, 9352, 10549, 11716, 12855, 13967, 15054, 16117, 17156, 18172, 19167, 20142, 21097, 22033, 22951, 23852, 24735, 25603, 26455, 27291, 28113, 28922, 29716, 30497, 31266, 32023, 32767 }; /* table used in 2 to the power computation */ __device__ static const Word32 pow2_table[33] = { 16384, 16743, 17109, 17484, 17867, 18258, 18658, 19066, 19484, 19911, 20347, 20792, 21247, 21713, 22188, 22674, 23170, 23678, 24196, 24726, 25268, 25821, 26386, 26964, 27554, 28158, 28774, 29405, 30048, 30706, 31379, 32066, 32767 }; /* table of cos(x) */ __device__ static const Word32 cos_table[65] = { 32767, 32729, 32610, 32413, 32138, 31786, 31357, 30853, 30274, 29622, 28899, 28106, 27246, 26320, 25330, 24279, 23170, 22006, 20788, 19520, 18205, 16846, 15447, 14010, 12540, 11039, 9512, 7962, 6393, 4808, 3212, 1608, 0, -1608, -3212, -4808, -6393, -7962, -9512, -11039, -12540, -14010, -15447, -16846, -18205, -19520, -20788, -22006, -23170, -24279, -25330, -26320, -27246, -28106, -28899, -29622, -30274, -30853, -31357, -31786, -32138, -32413, -32610, -32729, -32768 }; /* slope used to compute y = acos(x) */ __device__ static const Word32 acos_slope[64] = { -26887, -8812, -5323, -3813, -2979, -2444, -2081, -1811, -1608, -1450, -1322, -1219, -1132, -1059, -998, -946, -901, -861, -827, -797, -772, -750, -730, -713, -699, -687, -677, -668, -662, -657, -654, -652, -652, -654, -657, -662, -668, -677, -687, -699, -713, -730, -750, -772, -797, -827, -861, -901, -946, -998, -1059, -1132, -1219, -1322, -1450, -1608, -1811, -2081, -2444, -2979, -3813, -5323, -8812, -26887 }; /* All impulse responses are in Q15 */ /* phase dispersion impulse response (MR795) */ __device__ static const Word32 ph_imp_low_MR795[] = { 26777, 801, 2505, -683, -1382, 582, 604, -1274, 3511, -5894, 4534, -499, -1940, 3011, -5058, 5614, -1990, -1061, -1459, 4442, -700, -5335, 4609, 452, -589, -3352, 2953, 1267, -1212, -2590, 1731, 3670, -4475, -975, 4391, -2537, 949, -1363, -979, 5734 }; /* phase dispersion impulse response (MR795) */ __device__ static const Word32 ph_imp_mid_MR795[] = { 30274, 3831, -4036, 2972, -1048, -1002, 2477, -3043, 2815, -2231, 1753, -1611, 1714, -1775, 1543, -1008, 429, -169, 472, -1264, 2176, -2706, 2523, -1621, 344, 826, -1529, 1724, -1657, 1701, -2063, 2644, -3060, 2897, -1978, 557, 780, -1369, 842, 655 }; /* phase dispersion impulse response (MR475 - MR67) */ __device__ static const Word32 ph_imp_low[] = { 14690, 11518, 1268, -2761, -5671, 7514, -35, -2807, -3040, 4823, 2952, -8424, 3785, 1455, 2179, -8637, 8051, -2103, -1454, 777, 1108, -2385, 2254, -363, -674, -2103, 6046, -5681, 1072, 3123, -5058, 5312, -2329, -3728, 6924, -3889, 675, -1775, 29, 10145 }; /* phase dispersion impulse response (MR475 - MR67) */ __device__ static const Word32 ph_imp_mid[] = { 30274, 3831, -4036, 2972, -1048, -1002, 2477, -3043, 2815, -2231, 1753, -1611, 1714, -1775, 1543, -1008, 429, -169, 472, -1264, 2176, -2706, 2523, -1621, 344, 826, -1529, 1724, -1657, 1701, -2063, 2644, -3060, 2897, -1978, 557, 780, -1369, 842, 655 }; /* initialization table for the MA predictor in DTX */ #define PAST_RQ_INIT_SIZE 8 /* initalization table for MA predictor in dtx mode */ __device__ static const Word32 past_rq_init[80] = { -258, -318, -439, -634, -656, -773, -711, -502, -268, -193, -2, 125, 122, -39, -9, 105, 129, 283, 372, 575, -277, -324, -197, -487, -445, -362, -292, -27, 177, 543, 342, 517, 516, 130, 27, -104, -120, -140, -74, -56, -564, -943, -1520, -965, -814, -526, -322, -2, 159, 657, -312, -284, -386, -597, -493, -526, -418, -229, 105, 449, -557, -870, -1075, -919, -950, -752, -709, -316, 62, 486, -314, -191, -203, -330, -160, -103, -51, 131, 338, 515 }; #define ALPHA 29491 #define ONE_ALPHA 3277 /* LSF means (not in MR122) */ __device__ static const Word32 mean_lsf_3[10] = { 1546, 2272, 3778, 5488, 6972, 8382, 10047, 11229, 12766, 13714 }; #define ALPHA_122 31128 #define ONE_ALPHA_122 1639 /* LSF means ->normalize frequency domain */ __device__ static const Word32 mean_lsf_5[10] = { 1384, 2077, 3420, 5108, 6742, 8122, 9863, 11092, 12714, 13701 }; /* LSF prediction factors (not in MR122) */ __device__ static const Word32 pred_fac[10] = { 9556, 10769, 12571, 13292, 14381, 11651, 10588, 9767, 8593, 6484 }; #define DICO1_SIZE_3 256 #define DICO2_SIZE_3 512 #define DICO3_SIZE_3 512 /* 1st LSF quantizer (not in MR122 and MR795) */ __device__ static const Word32 dico1_lsf_3[] = { 6, 82, -131, 154, -56, -735, 183, -65, -265, 9, -210, -361, 113, 718, 1817, 1010, 1214, 1573, 857, 1333, 2276, 827, 1568, 1933, 717, 1989, 2206, 838, 1172, 1823, 721, 1000, 2154, 286, 476, 1509, -247, -531, 230, 147, -82, 569, 26, -177, -944, -27, -273, 692, -164, -264, -183, 224, 790, 1039, 899, 946, 601, 485, 771, 1150, 524, 677, 903, -140, 375, 778, 410, 676, 429, 301, 530, 1009, 719, 646, 38, 226, 367, 40, 145, -45, -505, 290, 121, -121, 302, 127, 166, -124, -383, -956, -358, -455, -977, 715, 878, 894, 978, 923, 211, 477, 272, 64, 188, -78, 17, -143, -65, 38, 643, 586, 621, -134, -426, -651, 347, 545, 2820, 1188, 2726, 2442, 142, -80, 1735, 283, 130, 461, -262, -399, -1145, -411, 155, 430, 329, 375, 779, 53, -226, -139, -129, -236, 1682, 285, 744, 1327, 738, 697, 1664, 312, 409, 266, 325, 720, 135, 1, 221, 453, 8, 203, 145, 299, 640, 760, 29, 468, 638, 103, 429, 379, 420, 954, 932, 1326, 1210, 1258, 704, 1012, 1152, -166, -444, -266, -316, -130, -376, 191, 1151, 1904, -240, -543, -1260, -112, 268, 1207, 70, 1062, 1583, 278, 1360, 1574, -258, -272, -768, 19, 563, 2240, -3, -265, 135, -295, -591, -388, 140, 354, -206, -260, -504, -795, -433, -718, -1319, 109, 331, 962, -429, -87, 652, -296, 426, 1019, -239, 775, 851, 489, 1334, 1073, -334, -332, 25, 543, 1206, 1807, 326, 61, 727, 578, 849, 1405, -208, -277, 329, -152, 64, 669, -434, -678, -727, -454, -71, 251, 605, 480, 254, -482, 11, 996, -289, 395, 486, 722, 1049, 1440, -30, -316, -786, -106, -115, -619, 861, 1474, 1412, 1055, 1366, 1184, 812, 1237, 925, 42, -251, -576, 342, 141, -454, -168, -80, 1359, -342, -656, -1763, 100, 821, 725, 990, 747, 800, 332, 440, 568, 663, 379, 852, 112, 165, -369, 597, 910, 282, -8, 834, 1281, -352, 572, 695, 462, 2246, 1806, 345, 190, 1374, 416, 915, 2166, 168, -82, 280, -516, -446, 840, 47, 533, 44, -362, -711, -1143, 22, 193, 1472, -85, 233, 1813, -62, 579, 1504, 550, 944, 1749, 723, 650, 1148, 972, 884, 1395, -425, 643, 0, 1000, 952, 1098, 249, 1446, 672, -334, -87, 2172, -554, 1882, 2672, 140, 1826, 1853, 920, 1749, 2590, 1076, 1933, 2038, -137, -443, -1555, 1269, 1174, 468, -493, -122, 1521, -451, 1033, 1214, 482, 1695, 1118, 815, 649, 384, -446, -692, 107, -319, -605, -118, -207, -505, 525, -468, -12, 2736, 75, 1934, 1305, 880, 2358, 2267, 1285, 1575, 2004, -48, -304, -1186, -435, -461, -251, -366, -404, -547, -289, -605, -597, -538, -810, -165, -120, 3, 356, 639, 1241, 1502, 96, 177, 750, -435, -585, -1174, -356, 109, -79, -485, 288, 2005, 9, 1116, 731, 880, 2134, 946, -265, 1585, 1065, 1157, 1210, 843, -498, -668, 431, 374, 321, -229, 1440, 2101, 1381, 449, 461, 1155, -105, 39, -384, -263, 367, 182, -371, -660, 773, -188, 1151, 971, 1333, 1632, 1435, 774, 1267, 1221, -482, -832, -1489, -237, -210, 860, 890, 1615, 1064, 472, 1062, 1192, 185, 1077, 989, -568, -992, -1704, -449, -902, -2043, -142, -377, -458, -210, -554, -1029, -11, 1133, 2265, -329, -675, -893, -250, 657, 1187, 519, 1510, 1779, 520, 539, 1403, 527, 1421, 1302, -563, -871, -1248, -147, -463, 879, -76, 2334, 2840, 563, 2573, 2385, 632, 1926, 2920, 719, 2023, 1840, -545, -723, 1108, 129, -125, 884, 1417, 1632, 925, -94, 1566, 1751, -341, 1533, 1551, 591, 395, -274, -76, 981, 2831, 153, 2985, 1844, 1032, 2565, 2749, 1508, 2832, 1879, 791, 1199, 538, -190, -453, 1489, -278, -548, 1158, -245, 1941, 2044, 1024, 1560, 1650, 512, 253, 466, -62, -323, 1151, -473, -376, 507, -433, 1380, 2162, 899, 1943, 1445, 134, 704, 440, 460, 525, -28, -450, 279, 1338, 0, 971, 252, -445, -627, -991, -348, -602, -1424, 398, 712, 1656, -107, 314, -178, 93, 2226, 2238, 518, 849, 656, -462, -711, -447, 174, -34, 1191, -119, 42, 1005, -372, 274, 758, 1036, 2352, 1838, 675, 1724, 1498, 430, 1286, 2133, -129, -439, 0, -373, 800, 2144, 6, 1587, 2478, 478, 596, 2128, -428, -736, 1505, 385, 178, 980, 139, 449, 1225, -526, -842, -982, 145, 1554, 1242, 623, 1448, 656, 349, 1016, 1482, 31, -280, 415, -316, 724, 1641, 360, 1058, 556, -436, -358, 1201, -355, 1123, 1939, 401, 1584, 2248, -527, -1012, 355, 233, 238, 2233, -550, -897, -639, -365, -501, 1957, 389, 1860, 1621, 162, 1132, 1264, -237, 1174, 1390, -640, -411, 116, -228, 1694, 2298, 1639, 2186, 2267, 562, 1273, 2658, 323, 338, 1774, 578, 1107, 852, 22, 594, 934, -143, 718, 446 }; /* 2nd LSF quantizer (not in MR122) */ __device__ static const Word32 dico2_lsf_3[] = { 50, 71, -9, -338, -698, -1407, 102, -138, -820, -310, -469, -1147, 414, 67, -267, 1060, 814, 1441, 1548, 1360, 1272, 1754, 1895, 1661, 2019, 2133, 1820, 1808, 2318, 1845, 644, -93, 454, 858, 329, -136, 489, -258, -128, -198, -745, -41, -52, -265, -985, 346, 137, 479, -1741, -748, -684, -1163, -1725, -367, -895, -1145, -784, -488, -946, -968, -85, -390, -725, 215, -340, -171, 1020, 916, 1969, 564, 179, 746, 662, 977, 1734, 887, 622, 914, 939, 856, 1165, 309, 688, 803, 917, 161, 570, 118, -20, -283, -816, -42, 204, -1228, -325, -462, -963, -202, -143, -988, -484, -361, -702, -978, -477, -302, -790, -1188, -100, -786, -1088, -1054, -947, -1684, -202, -843, -782, -1039, -1378, -901, -624, -110, -85, 356, 213, -10, -493, 364, 774, 425, 822, 479, -83, 557, 520, -992, -1560, -572, -603, -741, -26, -502, -638, -903, 209, 306, 147, -316, -593, -596, -85, -211, -225, -918, -529, 117, 233, -439, -738, 1101, 751, 633, 1457, 1716, 1511, 1765, 1457, 910, 1122, 1156, 849, 1354, 868, 470, -871, -1150, -1796, -871, -861, -992, -118, 155, 212, -1051, -849, -606, -1117, -1849, -2750, -1019, -1427, -1869, 370, -184, -414, 959, 493, 104, 958, 1039, 543, 154, 653, 201, 1249, 507, 150, 663, 503, 230, 623, 777, 675, 659, 88, -110, 843, 244, 224, 382, 541, 302, 724, 433, 666, 1166, 734, 341, -138, 20, -397, -1183, -424, -46, -321, -352, -124, 1333, 1021, 1080, 262, 366, 723, 922, 283, -551, 31, -636, -611, -689, -697, -415, -952, -779, -201, -1329, -598, -359, -953, -1285, 166, 493, 305, 221, 846, 703, 610, 840, 936, 774, -723, -1324, -1261, -357, -1025, -1388, -1096, -1376, -365, -1416, -1881, -608, -1798, -1727, -674, -545, -1173, -703, 678, 786, 148, -123, 696, 1288, 644, 350, -10, 414, 614, 15, 137, 344, -211, -814, -1512, -819, -391, -930, -588, 47, -591, -898, -909, -1097, -163, -1272, -1167, -157, -1464, -1525, -389, -1274, -1188, -624, 671, 213, 454, 124, -274, -525, -729, -496, -152, -1344, 122, 135, -2905, -589, -394, -1728, 441, -50, 1476, 904, 787, 316, 236, -440, -347, 217, 413, -911, -917, 121, -455, -932, 202, -92, -465, -375, 488, 390, 474, 876, 729, 316, -1815, -1312, -669, 87, 962, 432, 563, -249, -1058, 250, 285, 1105, 1141, 427, 696, -1038, -1664, -1582, -948, 346, 160, -309, -272, -858, 670, 624, 1250, -944, -408, -666, -606, -320, -384, -492, 230, 65, 334, -50, -16, -16, -690, -1397, 1791, 1716, 1399, 2478, 2063, 1404, 1245, 1471, 1426, -382, -1037, -2, 173, -398, 1145, 1491, 2024, 1801, 772, 1274, 1506, 1429, 1735, 2001, 1079, 1218, 1273, -1154, -1851, -1329, -808, -1133, -1096, -451, -1033, -1722, 65, 578, -84, -1476, -2434, -1778, -765, -1366, -494, -218, -594, -931, 337, -236, 562, 2357, 2662, 1938, 1489, 1276, 874, 189, 358, 374, -1519, -2281, -2346, -967, -1271, -2095, -628, -1188, -1542, 1661, 1043, 546, 565, 1061, 732, -64, -836, -434, -436, -96, 203, 1078, 1216, 1636, 907, 1534, 986, 326, 965, 845, 142, -84, 197, 470, 2379, 1570, 1133, 470, 1214, 395, 1376, 1200, 1125, 1042, 348, -543, -1234, -376, -215, -181, 481, -1947, -1621, -210, -750, -1185, 390, 29, -399, 27, 820, 1236, 755, 695, 979, 409, -174, 1197, 1035, 912, 1356, 1846, -992, -1437, 484, -1485, -1700, 208, -412, 1204, 1432, -271, 896, 1144, -416, 1777, 1434, -1696, -2644, -204, -1789, -1551, 1033, -1656, -1559, 1303, -1253, -1589, 1081, -669, -1095, -66, -682, 320, -345, 659, 305, 1069, -1292, -804, -19, -1635, -1291, 29, -1683, -497, 71, -287, -7, -100, -494, -962, -237, 852, 1881, 1740, -1217, -1387, 227, -660, 302, 373, 96, 1087, 1257, -1074, -1669, 160, 485, 2076, 1798, -934, -220, 552, -596, -612, 237, 336, 1720, 879, 643, 629, 434, 1267, 522, 1633, 15, 244, -441, 1475, 717, 184, 1819, 1590, 1709, 988, 261, 937, 2093, 2345, 1520, 2139, 1858, 1606, -577, -579, -1203, -956, 135, -488, -464, 51, -338, -629, -348, -723, 1146, 2073, 1442, 2192, 1466, 911, -1444, -1572, -2278, 1400, 710, 1297, 1335, 633, 928, 1434, 2194, 2594, 2422, 2204, 1881, 982, 2242, 1854, 380, 792, 1145, -63, -539, 414, -252, -964, -314, -1261, -683, -780, -831, -526, -1005, -1666, -1135, -424, -1611, -452, -299, 1268, 1048, 642, 1147, 853, 856, -675, -336, 139, 2268, 1343, 1418, 29, 768, 797, -1224, 423, 564, -1318, -1082, 245, -1302, -812, 573, -1298, -1617, 646, -968, 834, 723, 993, 1652, 2027, -191, -817, 432, 662, 60, 198, 626, 997, 1330, 1648, 1963, 1289, -1597, -93, -45, -1088, 37, -84, 1653, 2607, 2337, 1065, 2040, 2377, 1139, 2326, 2118, 859, 357, 1510, 664, 1227, 1099, 479, 1360, 912, 1897, 1754, 2019, 1168, 1909, 1784, 399, 34, 256, -593, -304, -1053, 547, 1694, 1407, 647, -99, -341, 1492, 1647, 1190, 38, -644, -212, 395, 846, 222, -704, -765, -716, -724, -1964, -2804, -150, 291, -82, 1233, 1459, 1007, -140, -155, 153, 439, 297, 1568, -1529, -410, -636, 1536, 455, -237, -1328, -139, -260, 531, 554, 868, 269, 1264, 606, -233, 883, 463, 742, 600, -120, -73, 421, 212, -439, -58, 804, -1286, -1241, 728, 294, -490, 50, -591, -905, -1254, 42, -687, 147, -25, 273, 596, -311, 1213, 601, -754, 849, 584, 429, 607, 587, -602, -166, 461, -796, -823, 777, 1380, 910, 1755, 119, 1417, 972, -219, -880, -1596, -1049, -1010, 438, -713, -1379, 78, 0, -447, -1179, -1136, -1319, -1573, 2248, 1767, 1309, 946, 1583, 1432, 1150, 482, 436, -469, -1108, 618, -447, -966, 1088, -1252, -1515, -114, -1104, -2008, -579, 210, 613, 497, -1975, -1437, 642, -1269, -856, 1011, -1646, -1185, 1063, -1555, -672, 1204, -1692, -1114, 623, -979, -1326, -1277, 539, -147, 894, -1354, -897, -434, 888, 475, 428, 153, -384, 338, -1492, -511, 359, -974, -1115, -470, 105, -550, 677, -937, -1145, 877, 380, -260, 210, 1685, 924, 1256, 1775, 1190, 1095, 1419, 631, 533, 627, 299, -347, -411, -534, 647, -650, 29, -595, -378, -1367, 1563, 1402, 1121, 1465, 1089, 1410, 648, -2096, -1090, -6, 311, -194, -869, -639, -831, 416, -1162, -1224, 1349, -1247, -941, 1813, -2193, -1987, 453, -619, -1367, -956, -1606, -1972, -1507, -1175, -1057, -1104, -377, 601, 201, 1876, 825, 374, -430, -1323, 29, -1397, -1249, -1331, -1007, -1504, 960, -1401, -2009, 197, -1379, -1949, -236, -1077, 123, 422, 615, 1269, 546, -306, 1526, 904, 1194, 1788, 1177, -626, -884, -1526, 199, 766, 1504, -1065, 862, 197, -1034, -1773, -887, -800, 145, 599, -1134, -519, 626, -1205, -1926, 500, -910, -1041, -1395, -1476, -1567, -969, -523, 842, 34, 1794, 646, 862, -1207, -1888, -1002, -78, -9, -672, 1044, 759, 80, -600, 1139, 1019, 57, 2000, 1422, -833, 1414, 1121, -1202, 1630, 1260, -461, 1420, 1244, 1537, 975, 253, -283, 324, -359, 599, -195, 106, 588, 62, -587, -757, 645, 205, 51, 1201, 758, -1209, 673, -390, -624, 1581, 941, -151, 1023, 735, 2820, 1301, 690, -302, 524, -99, -900, -1588, -1189, 1084, 251, 238, 2014, 1792, 1010, 1245, 1633, 1741, -1227, -1540, -1208, -621, 456, -109, 40, -65, 788, -805, -699, -1350, -583, 904, 832, -801, 532, 594, 1972, 1408, 1351, -1177, -1880, -2114, -773, 568, 948, -1015, 1079, 1260, -1111, 482, -130, 1778, 1044, 780, -1491, 245, 912, -316, -1141, -917, -536, -1442, -2346, -785, -1546, -1988, -2003, 257, 909, -1849, -633, -1209, -1538, -1918, -1054, 1606, 2239, 1576, -567, -1500, -1544, -1279, 195, 1369, -817, 293, 1219, -525, 630, 1197, -1698, -2425, -1840, -303, 731, 747, -1169, -251, 269, -950, -75, 1684, -1182, -453, 1005, -1599, 585, 378, -2075, -571, -427, -529, -1159, -1171, -283, -205, -564, -796, 1246, 717, 2277, 927, 539, -454, 559, 440, -717, 1460, 1615, -1030, 1052, 1610, -1169, -138, 847, 226, 39, -612, -1251, -106, -729, -651, 968, 1302, -714, -636, 1727, 353, 1069, 410, -798, -156, 1099, -574, 918, 446, -1310, 1012, 466, 1408, 1591, 765, 1429, 1380, 1757, 1949, 1956, 2378, 1578, 2047, 2148, 916, 98, -7, 1893, 1418, 2141, 348, 1405, 1579, 152, 1134, 1801, -267, 154, 1395, -1166, 469, 1054, -1142, -405, -1073, -1341, -2264, -1581, -364, 869, 1706, -1162, 549, 1550, -1225, -1932, -1666, -1485, -1977, -2055, -1727, -906, -98, -1897, 233, 1492, 892, 108, -331, -1728, -1170, -1700, -1060, 1980, 1790, -1070, -1741, -1909, -11, 1539, 1317, -1600, 94, 497, 421, 443, -197, -1578, -349, -994, -599, -539, 1140, -965, -1419, -129, -1341, 175, -447, -375, 1311, 2055, -371, -650, -307, -1073, 605, 365, -2057, -113, 430, 652, 914, 967, -1012, -1586, -2323, 1505, 1248, 559, 262, -486, -401, -1727, 1342, 1546, 50, 56, 432, -330, 119, -604, -1517, -1080, -810, 946, 1127, 1055, -1400, -1703, -1712, -1270, -704, -1317, 807, 1821, 1143, 2760, 1606, 2171, 1120, 409, -150, -147, 404, 959, 2439, 1911, 2189, -906, -141, -866, -904, -142, -458, -557, -708, -1679, -830, -1431, -1583, -1842, -1346, -1086, -1604, -272, 915, -1196, 772, 1056, -638, -1234, -1897, -500, -81, -822, -1289, -1613, -735, -117, 785, 168, -1090, 1133, 922, -1096, -746, 1384, 287, -547, -1063, -1376, -2201, -1204, -2176, -1570, -1757, -1511, -2241, -771, -1737, 1099, 830, -1588, 724, 1243, -1542, 693, 805, -1690, -240, 1665, -1700, -4, -668, 2149, 816, 1042, -818, -1841, 22, -764, -507, 449, -1151, -617, 289, -843, -1596, -240, 498, -234, -657, -752, 480, 1678, -319, -481, 193, -811, 171, -119, -2128, -202, -848, 1717, 1140, 1700 }; /* 3rd LSF quantizer (not in MR122, MR515 and MR475) */ __device__ static const Word32 dico3_lsf_3[] = { 67, -17, 66, -12, -1690, -581, -104, -272, -1076, -1186, -1845, -376, -1140, -926, -420, -58, -259, -656, -1134, -553, 1788, 1227, 455, 129, 462, 441, -240, -528, 840, 514, 130, -75, 1114, 623, 153, 216, 1068, 564, -6, -276, 1119, 727, 190, -68, 704, 306, 119, -264, 329, 61, -100, 156, 364, 123, 183, -208, -171, -123, 220, -65, -306, -62, 402, 17, -660, -938, -266, 0, 385, 235, 276, 285, 320, 268, -336, -200, -724, 17, -84, 381, -544, 429, 494, 519, -117, 288, 304, 329, 643, 157, 701, 508, 1200, 625, 796, 608, 998, 421, 492, 632, 1204, 780, 446, 132, 1257, 844, 547, 449, 829, 658, 541, 470, 1132, 1258, 918, 639, 547, 51, 423, 279, 9, 392, 83, 94, 542, 543, 229, -147, -198, 129, 194, -185, -863, -1321, -302, 30, -597, -629, -19, 114, -900, -1081, 466, 353, -1483, -1573, 15, -143, -1708, -2059, -751, 196, -1876, -2067, -642, -258, -2335, -1470, -450, -564, -584, -186, -872, -414, -1805, -988, -1125, -1310, -726, -1129, 28, 169, -1039, -864, -718, -246, 484, 36, -233, -49, 265, 67, 289, 467, 178, 543, 810, 540, 84, 282, 672, 703, -975, -777, 129, 287, -938, -227, 955, 595, -1617, -289, 836, 649, -1847, -215, 1106, 718, -2034, -1085, 650, 440, -2101, -529, 907, 575, -2011, -336, 670, 204, -2389, -692, 360, 137, -2156, -2204, -9, 280, -266, 119, 39, 193, 78, -59, -120, 226, -975, -858, -781, -1095, -619, -413, -451, -842, -1216, -1321, -813, -883, -1376, -1615, -394, -428, -737, -1113, -549, -790, -880, -975, -967, -642, -985, -886, -1273, -1361, -473, -804, -1401, -1407, 160, -265, -919, -275, -248, -250, -718, -380, 97, -103, -375, -229, -415, -193, -135, -555, 628, 361, 119, 216, 579, 364, 391, 209, 634, 522, -154, -148, 526, 389, 170, 33, 105, 267, 64, 380, -1503, -1000, -30, -369, -1070, 58, 647, 223, -1520, -291, 621, 307, -1531, 156, 762, 404, -2029, 141, 734, 499, -1849, -650, 306, 512, -187, -104, -59, 438, 134, -230, 156, -186, -61, -260, -16, 10, -569, -3, -421, -297, -1725, -521, -346, 178, -1362, -59, -44, 157, -2146, -461, -470, -349, -2170, -1, -369, -121, -1579, -373, -900, -1015, -1117, -591, -613, -784, -561, 122, -75, -449, -4, -171, -123, -372, 192, 168, -76, -132, 252, -107, 340, 210, 392, 509, 272, 181, -109, 145, 218, 119, -416, -263, 485, 265, -181, -8, -286, 226, -244, -218, 69, -290, -158, 191, -1, -64, -592, -90, 213, -96, 255, 435, 178, -80, -369, -18, -33, -80, -42, 415, 140, -222, 1143, 651, 649, 329, 767, 556, 249, 235, 948, 413, 442, 279, 141, 339, 356, 557, -470, -170, 99, 237, -569, -800, 352, 565, 282, 473, 470, 332, -199, -690, -1284, -917, -193, -426, -800, -1122, -26, -371, -490, -193, 637, 595, 519, 330, 408, -115, 79, 12, 477, 87, -103, -376, -666, -347, -277, -291, -510, -481, 169, 297, -829, -738, -205, -171, -320, -540, 328, 283, -859, -958, 442, -2, 556, 686, 130, 56, 1383, 1012, 755, 427, 612, 741, 628, 553, -339, -796, 134, 277, -633, -1085, -2, -246, -880, -1035, -1607, -1064, -994, -474, -1138, -488, -414, -795, 73, -206, -8, -139, 439, 204, -176, -578, 23, 131, -269, -757, -191, 245, -109, -338, 112, 316, 120, -406, -118, 611, -180, -186, -645, 115, -173, 34, -518, -489, -151, 61, -583, -844, 220, -138, -681, -1020, 391, -17, -598, -321, 157, -295, 129, 155, -926, -875, -987, 285, 241, -83, -125, -125, 620, 597, 432, 92, 393, 78, 409, 61, -393, -739, -413, -748, 83, 54, 361, 27, -1084, 130, -337, -694, -1565, 297, 318, -19, -1873, 36, 51, -317, -2323, -246, 231, -84, -2306, -783, 40, -179, -2233, -930, -474, -462, -754, -86, -288, -626, -2411, -455, -63, 171, -1099, -1094, -26, -143, -1193, -455, -406, -381, -605, -210, -96, -51, -580, -476, -276, -15, -1195, -634, -1203, -881, -378, -221, -669, -952, 594, 178, -403, -676, 763, 327, 601, 290, 172, 300, 203, 157, -56, -336, 356, 24, -228, -296, -259, -29, -186, 263, 416, 14, -353, 373, -12, -216, 257, 96, 174, 57, -1526, -616, -954, -499, -497, -152, -333, 125, 105, 200, 179, -97, -331, -224, 765, 697, 760, 256, 301, 59, 455, -85, 204, 288, -514, 240, 251, -109, 256, 417, -34, -413, 101, 430, 384, 156, -31, -10, 206, 426, 589, 145, 143, 71, 808, 906, 333, 349, 986, 938, 589, 331, 1300, 824, 187, 509, 1062, 653, 379, 466, 1462, 937, 401, 274, 787, 861, 265, 2, 609, 553, 28, 305, 926, 340, 106, 386, 241, -267, -147, 225, -178, -534, 347, 502, -643, -381, 397, 30, -651, -733, -435, 398, -407, -726, -484, -248, -789, -914, -438, -476, -498, -390, 75, -295, -964, -590, -606, 150, -121, -49, -155, -78, 935, 550, 389, 38, -321, 127, 424, 315, -285, -113, 283, 259, 658, 203, 322, 486, 903, 505, 748, 417, 611, 423, 555, 512, 239, -83, -578, -19, -339, -731, 349, 13, -934, -1399, -114, -360, 107, 692, 182, 90, -1243, -1538, -1551, -725, -568, -903, -1363, -525, -517, -853, -861, -1004, -168, -690, -835, 63, -137, -556, -547, 144, -286, -817, 485, 319, -147, -408, 526, 246, -347, -434, 297, -28, -290, -471, -1110, -1285, -460, -359, -988, -794, 1347, 1299, 690, 523, 1216, 1068, 1094, 757, 825, 1140, 752, 494, 1252, 1365, 1195, 898, 521, 1053, 532, 432, -334, -216, -313, -263, -160, 52, -472, -155, 127, 136, -380, 44, 851, 410, -162, -489, 123, -255, -796, -667, 1090, 917, 789, 493, 1397, 1197, 558, 202, -51, -118, -342, -701, 83, 108, -42, -441, 61, 95, 287, 256, -27, 89, 524, 531, 351, 227, 592, 545, 697, 155, -164, 307, 638, 274, -489, -50, 754, 240, -166, -124, -116, -579, -1212, -63, 190, -295, -1040, -1296, 147, -376, -177, -113, 841, 1241, 1051, 668, 2, 293, 551, 304, -1096, -953, -248, 376, -750, -965, 87, 516, -275, -516, 689, 391, -379, -643, 876, 594, -390, -1013, -645, 573, -107, -568, -689, -826, -1025, -27, -328, -203, 861, 749, 548, 233, -1660, -1043, 451, 108, -660, -620, 430, 236, 21, -396, -1158, -631, 1372, 1298, 967, 577, 1125, 1125, 589, 454, -323, -865, -467, 153, -468, -699, -804, -509, -392, -718, -204, -35, -603, -1093, -567, -162, -505, -1004, -102, 350, 219, 224, 423, 252, 395, 591, 608, 363, -746, -96, 373, 172, 171, 295, 714, 339, 233, 77, 107, 277, 157, 153, -499, -356, 1547, 1073, 576, 494, -292, -339, -504, -592, -903, -72, -619, -481, -1594, -1117, -567, -254, -793, -507, -564, -291, -492, -532, 502, 560, -382, 427, 600, 230, -227, 477, 251, 75, 285, 842, 813, 476, -1310, -1333, 186, 377, -587, -917, 643, 381, -1186, -553, 411, 82, -1127, -820, -174, -540, -604, 119, 543, 205, -380, 657, 909, 567, 112, -298, -374, 114, -857, -251, 56, 159, 401, 345, -34, -140, -111, -607, 41, 614, 355, -114, -77, 474, 578, 56, 1450, 924, 1098, 1420, 741, 400, 246, 22, 588, 313, -121, 327, 831, 472, -1138, -608, 856, 552, -1241, -1072, 638, 600, -358, 254, -333, -303, -646, 739, 358, 74, 1226, 1671, 1221, 849, 2241, 1624, 983, 636, 1841, 1477, 749, 384, 350, 263, 87, 128, -1902, -941, -144, -64, -1734, -255, 288, -31, -2644, -1238, 366, 235, -1643, -1092, -1344, -304, -541, -1075, -1116, 123, -1178, -252, -816, -180, -1016, 533, 565, 233, -487, -430, -188, 334, 867, 1236, 534, 171, -1590, -1607, 635, 630, -2196, 310, 924, 412, -2358, -328, 956, 529, -2639, -377, 630, 278, -2602, 317, 799, 299, -2406, 133, 340, 31, -2156, -1468, 131, 125, -1184, -490, -139, 46, -744, 447, 891, 564, 67, -451, 646, 604, -553, -429, -876, 396, 162, -66, 1305, 915, 479, 579, 1088, 794, 450, 278, 566, 324, -1057, -154, 148, -177, -2545, 168, 1070, 592, -2351, -42, 819, 345, -2344, -707, 721, 250, -2175, -1497, -309, 122, -78, -73, 120, 173, -4, 262, -263, -261, -431, -64, -405, -732, -2609, 116, -83, -193, -1525, -944, -477, -725, -508, 307, 170, 172, 832, 417, 832, 686, -225, 177, 894, 818, -482, -389, 1279, 1039, -383, 201, -350, 40, 730, 635, 226, 526, 503, 462, 338, 398, 535, 714, 40, -282, 1482, 1471, 1085, 731, 1561, 1072, 909, 693, 1419, 1282, 889, 879, 1153, 728, 1186, 840, -226, 1130, 949, 689, -494, -986, -1556, -128, -568, -721, -713, -26, 317, 524, 70, 135, -405, -865, -1766, -652, -174, -801, 885, 773, -153, -91, 1099, 751, -506, -1149, 853, 646, 241, 782, 519, 539, 1853, 1700, 1101, 684, -1249, -1486, -464, 188, -893, -1409, -1312, -341, -135, 438, -175, 18, 1111, 976, 319, 208, -1430, -1768, 83, 458, -530, -1000, 307, 129, -840, -15, -29, -356, -911, -924, -1147, -242, -119, -528, 127, -133, -761, -765, 190, -83, -315, 895, 522, 231, -222, 102, -63, -428, 316, 699, 379, 70, 25, 716, 314, -108, 507, 874, 566, 238, 108, 941, 519, 195, 425, -60, -427, 257, 139, -103, -630, 446, 334, 370, 412, 48, -172, -690, -283, 557, 187, -286, 158, 483, 140, 270, -344, -631, 924, 579, -116, 132, 142, 466, -68, -64, 230, -145, -302, -542, -803, -912, 1018, 737, -773, 1015, 630, 297, -2596, 95, 445, 336, -2122, 491, 510, 191, -1253, 161, -2, -324, -1450, -633, -712, -105, -842, -254, -411, 100, -640, -290, 1010, 763, -650, 313, 1169, 730, 140, 505, 1030, 766, 772, 287, 1067, 823, 495, 749, 305, 323, -164, 462, 78, 399, -342, -874, 69, 597, -16, 620, 621, 337, -138, -444, -265, 218, 84, -450, 953, 666, -222, -803, 541, 604, -921, -1376, 244, 116, -841, -723, 630, 588, 140, 663, 294, 368, 935, 1046, 881, 759, 1746, 1464, 916, 628, 436, 963, 281, 1, -119, 74, 542, 213, 1, -567, 301, 241, 260, 435, 222, 396, 936, 957, 1108, 703, 510, 506, 808, 478, 601, 694, 960, 620, 972, 741, 980, 600, 834, 717, 767, 684, 643, 972, 935, 638, 501, 661, 720, 851, -105, -632, -303, -117, -429, 130, 789, 442, -522, -188, 704, 373, -759, 42, 814, 523, -531, -1137, 373, 578, -682, -1203, -455, 285, -1163, -1577, -1098, 44, 81, -82, 712, 363, 477, 246, 954, 622, 1604, 1622, 1277, 891, 1409, 859, 924, 892, 774, 1041, 947, 1142, 40, -546, -75, 288, -616, -106, -697, -26, -169, -160, -891, -739, -279, -384, -1029, -350, 1781, 1308, 1046, 816, 1580, 1533, 1472, 1178, 1505, 1076, 1216, 899, 890, 904, 564, 654, 920, 692, 1021, 856, -493, 132, 177, 505, 71, 195, -28, 97, 456, 351, -164, 88, 439, 278, -40, 350, 1395, 949, 234, -95, -805, -472, 38, -163, 367, -98, 489, 523, 1025, 1178, 1212, 906, 319, 1314, 814, 461, -123, -543, -804, 447, -748, -324, -897, -1127, -737, -501, -789, -713, 715, 777, 1239, 922, 1949, 1939, 1368, 865, 730, 880, 758, 388, -871, 454, 17, -251, -381, -810, -1583, 239, -521, -966, -792, 259, -890, -1358, -770, -73, 166, 349, -212, 323, -840, -301, 473, 435, -679, -464, 728, 351, -156, -199, 667, 432, 29, -252, 415, 480, -731, -379, 145, 559, -528, -631, -1158, -159, 445, 273, 123, 639, 373, -126, 800, 568, 84, -162, 720, 712, -830, -536, -185, 222, 408, 452, 501, 771, -897, -1355, -67, 442, -792, -1406, 566, 602, 167, -326, 509, 330, -95, -626, -730, -344, 1668, 1217, 779, 455, 1316, 828, 584, 719, 404, -31, 1013, 789, 89, 107, 891, 549, 871, 1581, 917, 671, 866, 1479, 1289, 854, 391, 1068, 1122, 812, 78, -562, 345, 563, 429, -103, 417, 787, -122, -437, 411, 788, -913, -417, 602, 754, -226, -16, 151, 760, -700, 118, -104, -14, -1128, 48, 284, 393, -390, -419, -639, -116, -910, 306, 316, -13, 1207, 984, 821, 669, -1195, -693, 140, -213, -884, -416, -199, -558, -616, 245, -404, -664, 262, 56, -617, -724, -85, -491, -320, -656, -570, -831, -129, -528, -1506, -63, -367, -385, -358, -321, 4, 51, -366, -214, 319, 511, 146, 671, -17, -291, -110, 464, -139, -496, -202, 220, -312, -631, -660, -73, -655, -820, -662, -653, -1288, -857, -430, -953, -959, -264, -49, -468, -72, -381, -350, -563, -193, -407, 55, -408, -803, 11, -309, 649, 188, -198, -512, 461, -79, -458, -1318, -263, -134, -523, -1657, -435, -495, -765, 57, -347, -414, 434, -1141, -242, -664, -857, 34, -68, -707, -338 }; #define MR515_3_SIZE 128 /* 3rd LSF quantizer (MR515 and MR475) */ __device__ static const Word32 mr515_3_lsf[] = { 419, 163, -30, -262, -455, -789, -1430, -721, 1006, 664, 269, 25, 619, 260, 183, 96, -968, -1358, -388, 135, -693, 835, 456, 154, 1105, 703, 569, 363, 1625, 1326, 985, 748, -220, 219, 76, -208, -1455, -1662, 49, 149, -964, -172, -752, -336, 625, 209, -250, -66, -1017, -838, -2, 317, -2168, -1485, -138, 123, -1876, -2099, -521, 85, -967, -366, -695, -881, -921, -1011, -763, -949, -124, -256, -352, -660, 178, 463, 354, 304, -1744, -591, -282, 79, -2249, 175, 867, 499, -138, -180, -181, -21, -2291, -1241, -460, -520, -771, 451, -10, -308, 271, -65, 4, 214, -279, -435, -43, -348, -670, 35, -65, -211, 806, 535, 85, 297, 57, 239, 722, 493, 225, 661, 840, 547, -540, -376, 14, 349, 469, 721, 331, 162, -544, -752, -62, -10, 398, -88, 724, 701, -19, -533, -94, 601, 136, -71, -681, -747, -166, -344, 261, -50, 161, -52, 485, 337, -1675, 50, 190, -93, -2282, -231, -194, -82, -95, -595, -154, 128, 894, 501, 588, 457, -345, 206, 122, 110, -631, -227, -569, 3, 408, 239, 397, 226, -197, -2, 128, 491, 1281, 904, 292, 215, 538, 306, 259, 509, -677, -1047, 13, 321, -679, -588, -358, -212, -558, 243, 646, 479, 486, 342, 634, 532, 107, 802, 331, 136, -112, -398, -1031, -286, -326, -705, 288, 272, 1299, 1144, 1178, 860, -423, 121, -385, -148, -295, -302, -834, -819, 16, -24, -201, -476, 555, 91, -245, 294, -38, -379, -962, -1221, -1191, -1518, -273, -395, -390, -1013, -645, 573, -1843, -1030, 505, 468, 744, 947, 609, 493, -689, -1172, -628, -135, -1026, 195, 411, 196, 1582, 1147, 575, 337, -1239, -777, -648, -142, 595, 825, 967, 735, -1206, -970, -81, -342, -745, 13, -72, 375, 454, 19, 1407, 921, -1647, -172, 861, 562, 928, 1537, 1063, 740, -2472, -952, 264, 82, -502, -965, -1334, 123, 867, 1236, 534, 171, -2320, -460, 780, 363, -1190, -617, 252, -61, -174, 34, 1011, 788, -2333, 247, 423, 153, -16, -355, 262, 449, -1576, -1073, -544, -371, -615, -305, 1051, 805, 687, 528, 6, -182, 935, 875, 1002, 809, 199, 257, 126, 76, -584, -1138, 599, 556, -1105, -1391, -1591, -519, -977, -1325, 108, 347, -722, -975, 365, 101, -145, 681, 249, -153, 0, -334, -570, 159, 412, 285, -336, -617, -953, -966, 887, 689, -1251, 84, -185, -398, -592, 433, 1044, 653, 85, 329, -40, 361, -433, -705, 466, 574, -154, 654, 592, 290, -167, 72, 349, 175, 674, 297, 977, 720, 1235, 1204, 757, 488, -400, -269, 538, 372, -1350, -1387, -1194, -91, 1262, 876, 775, 700, -599, -38, -430, -722, 1976, 1630, 991, 608, 111, 276, -226, -96, -947, -388, -11, -7, -303, -531, -839, 338, 1734, 1710, 1405, 1013, -516, -855, -645, 210, -688, -416, 513, 230, -822, -637, -1146, -320, -952, -658, -694, 183, -114, -623, 818, 674, -191, -204, 731, 635, 51, 1221, 883, 576, -954, -431, 826, 598, -342, -755, -900, -407, -1126, -354, -206, -512, -547, -810, -357, -620, 66, 515, -73, -410, -872, -945, -1444, -1227, 191, -17, -544, -231, -1540, -544, -901, -886 }; #define MR795_1_SIZE 512 /* 1st LSF quantizer (MR795) */ __device__ static const Word32 mr795_1_lsf[] = { -890, -1550, -2541, -819, -970, 175, -826, -1234, -762, -599, -22, 634, -811, -987, -902, -323, 203, 26, -383, -235, -781, -399, 1262, 906, -932, -1399, -1380, -624, 93, 87, -414, -539, -691, 37, 633, 510, -387, -476, -1330, 399, 66, 263, -407, -49, -335, -417, 1041, 1865, -779, -1089, -1440, -746, -858, 832, -581, -759, -371, -673, -506, 2088, -560, -634, -1179, 271, 241, 14, -438, -244, -397, 463, 1202, 1047, -606, -797, -1438, -51, -323, 481, -224, -584, -527, 494, 881, 682, -433, -306, -1002, 554, 659, 222, 171, -160, -353, 681, 1798, 1565, -852, -1181, -1695, -336, -666, 114, -581, -756, -744, -195, 375, 497, -465, -804, -1098, 154, 282, -131, -50, -191, -719, 323, 732, 1542, -722, -819, -1404, 105, -250, 185, -178, -502, -742, 321, 510, 1111, -323, -567, -966, 127, 484, 338, -160, 52, -338, 732, 1367, 1554, -626, -802, -1696, -286, -586, 676, -695, -343, -370, -490, 295, 1893, -630, -574, -1014, -80, 645, -69, -6, -318, -364, 782, 1450, 1038, -313, -733, -1395, 120, 60, 477, -264, -585, -123, 711, 1245, 633, -91, -355, -1016, 771, 758, 261, 253, 81, -474, 930, 2215, 1720, -808, -1099, -1925, -560, -782, 169, -804, -1074, -188, -626, -55, 1405, -694, -716, -1194, -660, 354, 329, -514, -55, -543, 366, 1033, 1182, -658, -959, -1357, -55, -184, 93, -605, -286, -662, 404, 449, 827, -286, -350, -1263, 628, 306, 227, -16, 147, -623, 186, 923, 2146, -674, -890, -1606, -443, -228, 339, -369, -790, -409, 231, 86, 1469, -448, -581, -1061, 594, 450, -177, -124, -170, -447, 671, 1159, 1404, -476, -667, -1511, -77, -138, 716, -177, -372, -381, 451, 934, 915, -250, -432, -822, 272, 828, 446, 26, 19, -31, 698, 1692, 2168, -646, -977, -1924, -179, -473, 268, -379, -745, -691, 11, 127, 1033, -488, -917, -825, 61, 323, 135, 147, -145, -686, 685, 786, 1682, -506, -848, -1297, 35, 90, 222, -23, -346, -670, 455, 591, 1287, -203, -593, -1086, 652, 352, 437, 39, 63, -457, 841, 1265, 2105, -520, -882, -1584, -328, -711, 1421, -596, -342, -70, 209, 173, 1928, -423, -598, -921, 421, 605, -38, -2, -245, -127, 896, 1969, 1135, -379, -518, -1579, 173, 118, 753, -55, -381, -52, 985, 1021, 753, -2, -291, -891, 753, 992, 423, 264, 131, -196, 895, 2274, 2543, -635, -1088, -2499, -529, -982, 526, -764, -830, -548, -436, 316, 599, -675, -940, -746, -57, 236, -11, -201, -81, -798, 16, 845, 1558, -737, -985, -1212, -468, 17, 290, -279, -584, -700, 183, 822, 705, -265, -492, -1187, 421, 152, 468, -390, 166, -268, 39, 1550, 1868, -635, -966, -1571, -453, -492, 910, -284, -1027, -75, -181, -133, 1852, -445, -624, -1174, 420, 367, -49, -389, -212, -169, 707, 1073, 1208, -539, -710, -1449, 83, -163, 484, -236, -543, -355, 338, 1175, 814, -246, -309, -958, 606, 760, 60, 166, -8, -163, -306, 1849, 2563, -747, -1025, -1783, -419, -446, 209, -718, -566, -534, -506, 693, 857, -463, -697, -1082, 325, 431, -206, -15, -8, -763, 545, 919, 1518, -611, -783, -1313, 256, -55, 208, -165, -348, -662, 321, 680, 930, -326, -429, -951, 484, 446, 570, -197, 72, -73, 909, 1455, 1741, -563, -737, -1974, -124, -416, 718, -478, -404, -314, -16, 446, 1636, -551, -537, -750, -58, 638, 214, 55, -185, -271, 1148, 1301, 1212, -483, -671, -1264, 117, 285, 543, -204, -391, -111, 513, 1538, 854, -114, -190, -978, 877, 595, 464, 260, 260, -311, 748, 2283, 2216, -517, -945, -2171, -326, -708, 378, -812, -691, -232, -560, 687, 1409, -732, -690, -836, -359, 645, 386, -265, 62, -678, 145, 1644, 1208, -555, -988, -1233, -78, 14, 114, -327, -358, -489, 392, 677, 697, -201, -236, -1140, 693, 449, 178, -243, 256, -433, 611, 1385, 2456, -612, -901, -1464, -307, -17, 499, -315, -667, -254, 256, 428, 1463, -486, -422, -1056, 655, 370, 18, -102, -185, -276, 755, 1578, 1335, -488, -603, -1418, 182, -93, 870, -73, -458, -348, 835, 862, 957, -282, -333, -746, 547, 839, 428, 273, -89, 13, 940, 1708, 2576, -418, -1084, -1758, -44, -358, 259, -497, -643, -560, 99, 557, 961, -421, -766, -917, 295, 326, 184, 175, 15, -626, 532, 878, 1981, -443, -768, -1275, 221, 156, 268, 39, -363, -505, 695, 772, 1140, -162, -459, -912, 709, 444, 658, 25, 303, -312, 1268, 1410, 1715, -297, -766, -1836, -263, -108, 1070, -406, -13, -129, 57, 438, 2734, -374, -487, -835, 304, 696, 164, 104, -235, 5, 1611, 1900, 1399, -229, -582, -1325, 405, 192, 817, -87, -438, 111, 1028, 1199, 993, 68, -175, -934, 1033, 1117, 451, 478, 200, -248, 2127, 2696, 2042, -835, -1323, -2131, -799, -692, 466, -812, -1032, -469, -622, 288, 920, -701, -841, -1070, -411, 512, 8, -390, -91, -744, -30, 1043, 1161, -822, -1148, -1156, -294, -46, 110, -411, -374, -678, 214, 531, 668, -406, -420, -1194, 487, 232, 303, -318, 91, -472, 123, 1232, 2445, -722, -952, -1495, -738, -675, 1332, -543, -606, -211, -95, -98, 1508, -549, -514, -1193, 473, 211, 73, -288, -112, -389, 537, 1332, 1258, -567, -755, -1545, 71, -283, 632, -170, -481, -493, 681, 1002, 817, -356, -331, -877, 419, 706, 346, 241, -34, -326, 377, 1950, 1883, -727, -1075, -1625, -233, -543, 116, -524, -806, -585, -73, 478, 729, -288, -925, -1143, 173, 447, -52, 68, -229, -606, 449, 529, 1797, -591, -875, -1363, 183, -144, 324, -103, -452, -666, 623, 488, 1176, -238, -511, -1004, 326, 552, 458, 136, 108, -319, 626, 1343, 1883, -490, -646, -1730, -186, -449, 984, -738, -76, -170, -550, 755, 2560, -496, -510, -947, 210, 694, -52, 84, -322, -199, 1090, 1625, 1224, -376, -603, -1396, 343, 74, 632, -175, -502, -32, 972, 1332, 734, 52, -295, -1113, 1065, 918, 160, 393, 107, -397, 1214, 2649, 1741, -632, -1201, -1891, -719, -277, 353, -651, -880, -122, -211, 209, 1338, -562, -714, -1059, -208, 388, 159, -320, -61, -551, 293, 1092, 1443, -648, -865, -1253, -49, -143, 305, -401, -227, -585, 561, 532, 927, -117, -443, -1188, 507, 436, 292, -79, 233, -458, 671, 1025, 2396, -633, -842, -1525, -308, -286, 640, -373, -621, -407, 418, 253, 1305, -315, -581, -1137, 572, 685, -281, 61, -68, -371, 991, 1101, 1498, -493, -683, -1362, -47, 164, 704, -256, -314, -268, 631, 949, 1052, -118, -348, -833, 68, 1180, 568, 152, 117, 34, 1113, 1902, 2239, -601, -959, -1706, -143, -489, 480, -332, -655, -574, 54, 353, 1192, -462, -652, -796, 150, 549, 112, 195, -111, -515, 679, 1108, 1647, -558, -749, -1217, -9, 272, 341, -53, -265, -535, 489, 843, 1298, -120, -482, -1032, 632, 543, 408, 179, 306, -526, 1124, 1464, 2244, -417, -786, -1562, -224, -384, 1364, -377, -459, -25, 385, 489, 2174, -332, -651, -829, 544, 553, 61, 22, -113, -89, 1128, 1725, 1524, -216, -373, -1653, 161, 316, 908, -165, -222, -67, 1362, 1175, 789, 73, -252, -767, 738, 932, 616, 362, 246, -126, 787, 2654, 3027, -691, -1106, -2190, -565, -588, 524, -590, -979, -490, -263, 397, 982, -577, -837, -945, -22, 435, -49, -190, -118, -629, -88, 1240, 1513, -636, -1051, -1019, -291, 189, 259, -257, -470, -629, 145, 945, 894, -326, -364, -1094, 543, 260, 630, -202, 189, -209, 357, 1379, 2091, -569, -1075, -1449, -714, -239, 919, -420, -705, -84, -109, -114, 2407, -413, -529, -1177, 482, 368, 131, -186, -72, -131, 861, 1255, 1220, -611, -658, -1341, 227, -121, 631, -176, -489, -218, 745, 1175, 957, -321, -148, -936, 671, 966, 216, 340, -3, -143, 469, 1848, 2437, -729, -961, -1683, -213, -254, 321, -511, -438, -521, -126, 725, 903, -340, -685, -1032, 316, 480, 20, 23, -89, -551, 353, 1051, 1789, -544, -757, -1364, 298, -25, 436, -100, -392, -519, 467, 754, 1078, -210, -398, -1078, 620, 658, 630, 33, 147, -178, 921, 1687, 1921, -325, -528, -1978, 2, -285, 910, -371, -490, -230, 0, 597, 2010, -496, -395, -834, 37, 945, 245, 181, -160, -144, 1481, 1373, 1357, -355, -601, -1270, 298, 322, 672, -193, -336, 77, 1089, 1533, 922, 177, -39, -1125, 996, 781, 536, 456, 366, -432, 1415, 2440, 2279, -466, -758, -2325, -303, -509, 387, -727, -557, 66, -145, 643, 1248, -544, -676, -916, -225, 862, 588, -152, 40, -533, 423, 1423, 1558, -572, -843, -1145, -128, 85, 461, -238, -257, -584, 605, 748, 861, 24, -202, -1409, 797, 487, 303, -181, 364, -182, 616, 1378, 2942, -494, -852, -1441, -292, 61, 812, -84, -723, -182, 555, 532, 1506, -365, -493, -1057, 822, 588, 11, -14, -18, -230, 1001, 1401, 1451, -474, -569, -1292, 302, 62, 1062, -70, -376, -222, 982, 974, 1149, -196, -234, -795, 479, 1098, 499, 362, 58, 70, 1147, 2069, 2857, -487, -878, -1824, 73, -288, 348, -358, -500, -508, 199, 721, 1242, -78, -697, -795, 361, 536, 196, 374, 110, -735, 847, 1051, 1896, -366, -713, -1182, 315, 320, 429, 72, -215, -450, 759, 886, 1363, -30, -428, -834, 861, 627, 796, 118, 468, -279, 1355, 1883, 1893, -188, -642, -1612, 63, -175, 1198, -418, -211, 51, 414, 587, 2601, -234, -557, -858, 424, 889, 222, 136, -101, 83, 1413, 2278, 1383, -84, -445, -1389, 414, 313, 1045, 29, -343, 65, 1552, 1647, 980, 183, -91, -829, 1273, 1413, 360, 553, 272, -107, 1587, 3149, 2603 }; #define DICO1_SIZE_5 128 #define DICO2_SIZE_5 256 #define DICO3_SIZE_5 256 #define DICO4_SIZE_5 256 #define DICO5_SIZE_5 64 /* 1st LSF quantizer (MR122) */ __device__ static const Word32 dico1_lsf_5[DICO1_SIZE_5 * 4] = { -451, -1065, -529, -1305, -450, -756, -497, -863, -384, -619, -413, -669, -317, -538, -331, -556, -414, -508, -424, -378, -274, -324, -434, -614, -226, -500, -232, -514, -263, -377, -298, -410, -151, -710, -174, -818, -149, -412, -156, -429, -288, -462, -186, -203, -170, -302, -191, -321, -131, -147, -297, -395, -228, -214, -245, -192, -67, -316, -71, -327, -104, -205, -94, -183, -143, -38, -193, -95, 16, -76, -124, -248, 23, -237, 24, -244, 18, -136, 44, -111, -33, -24, -25, 0, 149, 19, 23, -143, 158, -169, 174, -181, 133, -55, 165, -26, 111, 84, 98, 75, 87, 183, -115, -11, -8, 130, 11, 170, 254, 77, 205, 17, 183, 112, 262, 194, 202, 287, 95, 189, -42, -105, 234, 179, 39, 186, 163, 345, 332, 199, 299, 161, -54, 285, -78, 281, -133, 141, -182, 111, 249, 341, 271, 364, 93, 403, 75, 391, 92, 510, -138, 220, -185, -29, -34, 361, -115, 320, 3, 554, 99, 286, 218, 591, -245, 406, -268, 453, 0, 580, 25, 606, 275, 532, 148, 450, -73, 739, -285, 518, -288, 94, -203, 674, -140, -74, 205, 714, -114, 299, 176, 923, 182, 557, 240, 705, -16, 513, 485, 593, 293, 384, 451, 617, -38, 50, 563, 529, 303, 209, 459, 363, 433, 452, 450, 454, 367, 606, 477, 741, 432, 353, 368, 267, 361, 716, 273, 583, 453, 166, 510, 172, 201, 629, 274, 191, 568, 639, 302, 298, 634, 387, 643, 350, 587, 560, 612, 565, 600, 788, 487, 672, 512, 1015, 321, 333, 357, 854, -125, 413, 474, 712, 17, -151, 564, 285, 270, -241, 971, 889, 489, 220, 510, 896, 549, 924, 327, 825, 290, 911, 540, 1108, 158, 805, 199, 957, 511, 730, 100, 874, 13, 791, 435, 632, 676, 972, 249, 900, 467, 1218, 781, 1074, 585, 785, -23, 669, 267, 1043, 619, 1084, 615, 1145, 622, 905, 916, 1049, 80, 331, 584, 1075, 89, 639, 988, 961, 770, 720, 798, 699, 492, 447, 899, 627, 271, 1188, 725, 1333, 87, 603, 832, 1603, 616, 1127, 890, 1505, 1000, 1156, 866, 1009, 995, 827, 1149, 858, 817, 1450, 773, 1320, 500, 1389, 312, 1153, -20, 1084, 64, 1283, 2, 1172, 399, 1869, 514, 1706, 502, 1636, 886, 1522, 416, 600, 1131, 1350, 1275, 1390, 889, 1795, 914, 1766, 227, 1183, 1250, 1826, 505, 1854, 919, 2353, -199, 431, 152, 1735, -213, -28, 392, 1334, -153, -52, 978, 1151, -323, -400, 813, 1703, -136, 84, 1449, 2015, -331, -143, -137, 1192, -256, 534, -157, 1031, -307, -439, 542, 731, -329, -420, -97, 616, -362, -168, -322, 366, -247, -110, -211, 89, -196, -309, 20, 59, -364, -463, -286, 89, -336, 175, -432, 141, -379, -190, -434, -196, -79, 150, -278, -227, -280, 166, -555, -422, -155, 541, -366, 54, -29, -83, -301, -774, 186, 628, -397, -264, 242, 293, -197, -585, 124, 410, 53, -133, 10, 340, -570, -1065, 65, -446, 68, -493, 383, 937, -357, -711, -359, -250, -677, -1068, 292, -26, 363, 6, 607, 1313, -127, -10, 1513, 1886, 713, 972, 1469, 2181, 1443, 2016 }; /* 2nd LSF quantizer (MR122) */ __device__ static const Word32 dico2_lsf_5[DICO2_SIZE_5 * 4] = { -1631, -1600, -1796, -2290, -1027, -1770, -1100, -2025, -1277, -1388, -1367, -1534, -947, -1461, -972, -1524, -999, -1222, -1020, -1172, -815, -987, -992, -1371, -1216, -1006, -1289, -1094, -744, -1268, -755, -1293, -862, -923, -905, -984, -678, -1051, -685, -1050, -1087, -985, -1062, -679, -989, -641, -1127, -976, -762, -654, -890, -806, -833, -1091, -706, -629, -621, -806, -640, -812, -775, -634, -779, -543, -996, -565, -1075, -580, -546, -611, -572, -619, -760, -290, -879, -526, -823, -462, -795, -253, -553, -415, -589, -439, -533, -340, -692, -935, -505, -772, -702, -1131, -263, -306, -971, -483, -445, -74, -555, -548, -614, -129, -693, -234, -396, -246, -475, -250, -265, -404, -376, -514, -417, -510, -300, -313, -334, -664, -463, -814, -386, -704, -337, -615, -234, -201, -233, -239, -167, -567, -203, -619, -147, -415, -115, -352, -166, -750, -171, -761, -270, -879, -264, -903, -367, -744, 43, -475, 14, -653, 43, -670, 11, -448, -59, -521, -126, -119, -155, -613, -42, -863, -27, -931, 136, -483, 183, -468, 55, -298, 55, -304, 313, -609, 313, -720, 322, -167, 100, -541, -3, -119, -111, -187, 233, -236, 260, -234, 26, -165, 134, -45, -40, -549, 360, -203, 378, -388, 450, -383, 275, 20, 182, -103, 246, -111, 431, 37, 462, -146, 487, -157, -284, -59, 503, -184, 24, 53, -3, 54, 122, 259, 333, 66, 484, 104, 436, 68, 195, 116, 190, 206, 269, -9, 482, 352, 382, 285, 399, 277, 452, 256, 69, 186, 13, 297, -13, 259, -95, 30, 56, 394, 196, 425, 205, 456, 281, 577, 15, 191, 375, 290, 407, 576, -56, 227, 544, 405, 0, 549, -92, 528, -229, 351, -245, 338, -362, 435, 167, 527, -75, 302, 91, 824, 129, 599, 496, 679, 186, 749, 153, 737, -281, 600, -348, 615, -236, 769, 41, 881, 38, 890, -220, 841, -357, 883, -393, 903, -634, 474, -444, 850, -175, 678, -493, 242, -519, 785, -714, 582, -541, 366, -543, 434, -597, 500, -765, 222, -702, 917, -743, 962, -869, 501, -899, 548, -379, 200, -435, 157, -819, 214, -861, 157, -614, 40, -632, 94, -883, -54, -741, 516, -501, 298, -614, -171, -870, -161, -865, -23, -818, 93, -1015, -267, -662, -359, -549, 2, -442, -121, -377, 0, -227, 33, -414, -126, -129, 212, -934, 34, -1082, -282, -1119, -268, -710, -825, -420, -191, -1076, -928, -917, -93, -628, -358, 97, 7, -206, -393, -101, 24, -203, 38, -168, 83, -599, -423, -279, 426, -700, 118, -75, 206, -981, -673, -680, 417, -367, 37, -279, 474, -129, -318, 319, 296, -626, -39, 343, 602, -696, -39, -303, 940, 104, 233, -380, 137, -36, 269, -75, -214, 120, 43, -529, -477, 459, 164, -202, -229, -49, -167, 609, 792, 98, -220, 915, 148, 293, 283, 869, 91, 575, 394, 326, -78, 717, 67, 365, -323, 616, -36, 731, 27, 619, 238, 632, 273, 448, 99, 801, 476, 869, 273, 685, 64, 789, 72, 1021, 217, 793, 459, 734, 360, 646, 480, 360, 322, 429, 464, 638, 430, 756, 363, 1000, 404, 683, 528, 602, 615, 655, 413, 946, 687, 937, 602, 904, 604, 555, 737, 786, 662, 467, 654, 362, 589, 929, 710, 498, 478, 415, 420, 693, 883, 813, 683, 781, 925, 913, 939, 726, 732, 491, 853, 531, 948, 734, 963, 315, 808, 761, 755, 1144, 760, 655, 1076, 826, 1057, 1091, 838, 1003, 808, 1047, 1133, 659, 1101, 992, 1050, 1074, 1075, 971, 694, 1226, 1054, 571, 841, 884, 1404, 1379, 1096, 1080, 861, 1231, 735, 1284, 760, 1272, 991, 1367, 1053, 1257, 700, 1050, 534, 988, 453, 1264, 599, 1140, 679, 1621, 815, 1384, 521, 1317, 393, 1564, 805, 1448, 686, 1068, 648, 875, 307, 1083, 361, 1047, 317, 1417, 964, 675, 571, 1152, 79, 1114, -47, 1530, 311, 1721, 314, 1166, 689, 514, -94, 349, 282, 1412, 328, 1025, 487, -65, 57, 805, 970, 36, 62, 769, -263, 791, -346, 637, 699, -137, 620, 534, 541, -735, 194, 711, 300, -268, -863, 926, 769, -708, -428, 506, 174, -892, -630, 435, 547, -1435, -258, 621, 471, -1018, -1368, -393, 521, -920, -686, -25, 20, -982, -1156, 340, 9, -1558, -1135, -352, 48, -1579, -402, -887, 6, -1156, -888, -548, -352, -1643, -1168, -159, 610, -2024, -963, -225, 193, -1656, -1960, -245, -493, -964, -1680, -936, -635, -1299, -1744, -1388, -604, -1540, -835, -1397, -135, -1588, -290, -1670, -712, -2011, -1632, -1663, -27, -2258, -811, -1157, 184, -1265, 189, -1367, 586, -2011, 201, -790, 712, -1210, 3, -1033, 808, -1251, 830, -111, 635, -1636, 447, -463, -949, -445, -928, -504, -1162, -501, -1211, 144, -351, -372, -1052, -283, -1059, -279, -1123, -575, -1438, -587, -1614, -935, -984, 229, 690, -921, -719, -403, 1362, -685, -465, 874, 397, -509, -46, 317, 1334, -485, 456, 813, 439, -411, 339, 898, 1067, -425, 46, 1441, 497, -909, -800, 1465, 1046, -254, -321, 1430, 1165, 68, 350, 1034, 666, 370, 11, 1311, 790, 143, 232, 1041, 1562, -114, 663, 1616, 1078, 454, 579, 1275, 1040, -76, 909, 752, 1067, 153, 512, 348, 1214, 614, 385, 1843, 808, 269, 1034, 203, 1086, 652, 1017, 1783, 1130, 429, 1327, 387, 1384, -49, 1183, -72, 1215, -416, 1001, 544, 1749, -352, 1223, -502, 1199, -589, 569, -227, 1630, -142, 1578, -230, 1715, -714, 1288, -838, 1398, 1131, 1357, -208, 1232, 437, 965, -929, 818, 811, 1410, 859, 1507, 164, 1212, 1387, 1793, 484, 1874, 456, 2063, 996, 1170, 1326, 1402, 1316, 1360, 1135, 1262, 1234, 1618, 1361, 1768, 1421, 1227, 1584, 1347, 854, 672, 1685, 1566, 1139, 1270, 2016, 1825, 1773, 1581, 1532, 1460, 1487, 946, 1659, 1021, 1744, 1212, 1392, 977, 1772, 1161, 1826, 1164, 1718, 1429, 1973, 1591, 1185, 864, 2132, 1061, 1799, 814, 1838, 757, 2104, 1315, 2054, 1258, 2113, 915, 2331, 930, 1467, 1147, 2590, 1439, 2245, 1744, 2090, 1620, 2358, 1454, 2666, 1506, 1876, 1837, 2070, 1975, 1739, 1577, 682, 1289, 1584, 2045, 1454, 2098, 2498, 2004, 2711, 2066, 726, 1588, 2756, 2336, 228, 847, 2456, 1659, 36, 301, 1942, 1957, -446, -96, 2154, 1396, 1533, 1101, 14, 608, -923, -732, 1383, 1982, 1345, 952, -680, 321, 1281, 1268, -1594, 365, 941, 946, -1737, -822, 2374, 2787, 1821, 2788 }; /* 3rd LSF quantizer (MR122) */ __device__ static const Word32 dico3_lsf_5[DICO3_SIZE_5 * 4] = { -1812, -2275, -1879, -2537, -1640, -1848, -1695, -2004, -1220, -1912, -1221, -2106, -1559, -1588, -1573, -1556, -1195, -1615, -1224, -1727, -1359, -1151, -1616, -1948, -1274, -1391, -1305, -1403, -1607, -1179, -1676, -1311, -1443, -1478, -1367, -898, -1256, -1059, -1331, -1134, -982, -1133, -1149, -1504, -1080, -1308, -1020, -1183, -980, -1486, -967, -1495, -988, -922, -1047, -1077, -838, -1179, -858, -1222, -1131, -1041, -1064, -767, -872, -1157, -701, -880, -706, -906, -774, -1016, -578, -1080, -801, -1478, -591, -1111, -592, -1146, -713, -1388, -640, -1376, -597, -1059, -416, -903, -686, -832, -661, -708, -444, -868, -490, -921, -374, -776, -619, -1170, -585, -549, -769, -795, -435, -659, -530, -741, -498, -837, -357, -597, -279, -871, -243, -887, -282, -665, -280, -667, -165, -560, -394, -903, -362, -410, -448, -583, -409, -574, -313, -357, -637, -548, -570, -436, -896, -504, -382, -757, -58, -481, -165, -618, -191, -374, -234, -382, -222, -683, -25, -480, -418, -359, -730, -353, -324, -157, -432, -322, -394, -303, -284, -104, -601, -289, -556, -196, -588, -150, -659, -608, -473, -24, -68, -448, -474, -8, -506, -45, -748, -184, -844, -252, -901, -91, -584, -97, -652, 138, -764, -131, -678, -12, -670, 165, -259, -3, -840, -107, -909, 37, -992, 44, -854, -415, -839, 13, -1001, -271, -1026, -309, -798, -478, -832, -488, -943, 168, -1112, -387, -1185, -101, -1183, -40, -941, -316, -1030, -770, -1044, -625, -1081, -538, -1224, -299, -1312, -436, -1197, -663, -1167, -161, -1216, -690, -1237, -831, -1432, -720, -1403, -493, -898, -740, -922, -801, -1102, -402, -1579, -964, -1061, -638, -1269, -1438, -1499, -934, -1502, -895, -1598, -564, -1723, -717, -606, -597, -1166, -1085, -1369, -468, -1946, -1493, -1838, -953, -1932, -931, -1499, -188, -1635, -421, -1457, -338, -1448, -22, -1942, -422, -2006, -249, -496, -114, -1910, -755, -1289, 174, -1451, -109, -482, -257, -1221, -508, -1617, 151, -1694, 208, -654, 107, -1651, 29, -1141, 279, -1215, 306, -1228, -506, -730, -175, -1236, -101, -969, 551, -870, 278, -823, 315, -563, 376, -1051, 228, -507, 280, -599, 281, -758, 253, -305, 379, -755, -134, -611, 660, -824, 536, -817, 646, -413, 49, -341, 177, -453, 526, -482, 589, -71, 339, -657, 264, -244, 295, -237, 315, -387, 569, -506, -9, -377, 14, -160, 661, -216, 40, -308, -46, 95, 214, -242, 167, -86, 192, -56, 27, -76, 31, 36, 309, -106, -182, -113, 74, -441, -22, 23, 139, 81, -11, 44, 15, -87, -137, -118, -207, -158, -58, 272, -92, -156, -441, 8, -136, 128, -221, 101, -218, 40, -197, -76, -456, 9, -445, 33, -423, 226, 60, 73, -222, 156, -399, 280, -318, 245, -341, 166, -499, 339, -190, 327, -219, 325, -137, -89, -596, 100, -627, 144, -677, 487, 28, 252, -391, 214, -41, 282, -28, 99, -286, 331, 49, 459, -388, 565, -369, 436, 28, 336, -9, 397, -167, 618, 34, 596, -17, 561, -140, 299, 79, 522, 125, 203, 2, 244, 288, 255, 211, 175, 82, 596, 187, 517, 108, 381, 255, 365, 297, 497, 352, 327, -82, 25, 210, 371, 245, 261, 3, 545, 449, 140, 294, 44, 295, 212, 347, 244, 494, 331, 528, 201, 307, 349, 411, 613, 284, 614, 413, 464, 322, 624, 397, 97, 200, -160, 384, 149, 362, 495, 525, 269, 585, 33, 491, -121, 433, 427, 611, 498, 516, 171, 443, 497, 666, 440, 275, 566, 575, 146, 639, 155, 670, -33, 173, 212, 696, -166, 601, -191, 695, -489, 503, 175, 742, 214, 476, 372, 1083, 578, 530, 586, 777, 425, 874, 315, 841, 374, 848, -165, 565, 35, 991, -39, 1062, 329, 712, 786, 840, 645, 795, 661, 676, 571, 918, 632, 1079, 673, 817, 318, 388, 874, 1012, 564, 848, 880, 620, 557, 479, 671, 453, 692, 468, 840, 642, 844, 645, 506, 428, 897, 567, 837, 387, 962, 499, 691, 561, 939, 926, 783, 296, 790, 268, 1028, 530, 874, 329, 548, 143, 675, 291, 503, 66, 1041, 359, 786, 97, 805, 33, 837, 470, 511, 49, 1092, 327, 1174, 323, 3, 242, 872, 474, 689, 429, 1329, 678, 1042, 620, 1109, 664, 321, 193, 889, 950, 1153, 874, 893, 635, 877, 862, 948, 913, 1293, 665, 1320, 639, 997, 793, 1402, 1030, 1176, 1012, 1110, 959, 1410, 925, 1403, 915, 543, 862, 1116, 1222, 835, 1190, 835, 1190, 959, 1148, 1147, 1376, 1300, 1193, 1415, 1231, 1335, 1341, 746, 1092, 1711, 1283, 1389, 1073, 1334, 1566, 1153, 1475, 1645, 1137, 1825, 1220, 1056, 1382, 1521, 1730, 1632, 1545, 1620, 1542, 855, 1596, 865, 1667, 693, 885, 1716, 1519, 1167, 1296, 2209, 1760, 1952, 1493, 2020, 1482, 1534, 1866, 1694, 2008, 1566, 748, 1761, 825, 294, 1392, 1084, 2058, 621, 1315, 365, 1287, 198, 1028, 488, 1408, 249, 403, 1014, 1561, 324, 363, 1645, 1044, 193, 367, 2034, 1859, -251, 579, 750, 994, -243, 30, 1325, 879, -28, -169, 624, 917, -453, 159, 186, 1370, -614, 6, 537, 392, -94, -291, 781, 229, -128, -298, 245, 491, -701, -648, 972, 789, -501, -640, 178, 255, -365, -390, -255, 317, -958, -294, -191, 228, -775, -447, 157, -237, -657, -720, -407, 92, -117, -611, 334, -230, -679, -1084, -144, -317, -901, -861, -738, -360, -85, -727, -90, -787, 100, -22, -391, -263, -56, -73, -337, -754, 5, -189, -706, -624, 89, -344, -135, -1113, -353, -237, -684, -1135, -275, -1102, -269, -1203, 152, 145, -722, -1232, 49, 80, -1248, -776, -248, 391, -732, -547, 469, 218, -255, -864, 69, 366, -166, -485, -688, 191, -1212, -1196, -170, -169, -1308, -1631, 321, 470, -1419, -1243, -64, 272, -1361, -248, 492, 565, -721, -609, 195, 485, -573, -133, 427, 202, -171, -118, 199, 575, 2, -31, 694, 755, -1366, -39, 552, 557, -489, 271, 680, 537, 13, -453, 855, 954, -133, -52, -81, 738, -1169, 637, 1055, 1059, -95, 676, 1259, 1081, 489, 305, -449, 954, -534, 996, -969, 866, -1058, 1059, -1294, 618, -1416, 617, -458, 1366, -159, 1821, -774, -528, -14, 1110, -1202, -901, -772, 433, -1256, -1255, -1011, -302, -602, -585, -759, -1618, -760, -1549, -840, -1921, -816, -539, -1769, -2235, -227, -36, -2034, -1831, -2107, -1126, -2471, -1816, -1470, 252, -2701, -415, -571, -467, 1509, 1554, 2180, 1975, 2326, 2020 }; /* 4th LSF quantizer (MR122) */ __device__ static const Word32 dico4_lsf_5[DICO4_SIZE_5 * 4] = { -1857, -1681, -1857, -1755, -2056, -1150, -2134, -1654, -1619, -1099, -1704, -1131, -1345, -1608, -1359, -1638, -1338, -1293, -1325, -1265, -1664, -1649, -1487, -851, -1346, -1832, -1413, -2188, -1282, -681, -1785, -1649, -966, -1082, -1183, -1676, -1054, -1073, -1142, -1158, -1207, -744, -1274, -997, -934, -1383, -927, -1416, -1010, -1305, -783, -955, -1049, -900, -993, -817, -737, -823, -972, -1189, -738, -1094, -738, -1154, -784, -801, -810, -786, -892, -520, -1000, -818, -644, -965, -577, -882, -541, -694, -671, -917, -595, -642, -646, -615, -956, -621, -925, -515, -727, -483, -815, -485, -840, -578, -440, -713, -578, -325, -657, -670, -386, -570, -441, -666, -514, -787, -392, -529, -522, -453, -487, -423, -616, -585, -617, -157, -662, -268, -680, -348, -322, -323, -632, -444, -304, -430, -332, -458, -277, -468, -659, -793, -319, -636, -227, -554, -373, -347, -334, -210, -456, -192, -530, -242, -216, -198, -366, -370, -338, -161, -409, -748, -107, -380, -294, -643, -223, -665, -234, -741, -141, -496, -130, -510, -139, -327, -172, -305, -306, -580, -164, -263, -262, -172, -67, -402, 31, -366, -10, -436, -86, -527, 71, -377, -22, -609, -12, -678, -67, -319, 63, -191, 35, -181, -39, -242, 126, -167, -140, -544, 155, -297, 174, -297, 38, -8, 117, -380, 197, -452, 240, -522, 223, -103, 110, -187, 87, -155, 169, -47, 157, 26, -83, -100, 128, 80, 209, -62, 6, 7, 22, 5, 318, -20, 248, -45, -200, -63, 156, -69, 250, -183, 369, -126, -113, -76, -142, -122, -64, -254, -31, 35, -177, -71, -7, 171, 93, 27, 108, 212, -330, -209, -123, -70, -279, 95, -96, 20, -188, -61, -314, 87, -300, -78, -354, -134, 11, 122, -140, 122, -275, 152, -293, 140, -82, 138, -321, -111, -480, -156, -359, 76, -254, -40, -635, -96, -522, 79, -507, 8, -268, 303, -539, 68, -446, 61, -522, 306, 111, 189, -435, 122, -379, 166, -571, -398, -632, -74, -747, -95, -455, 194, -952, 83, -798, 192, -755, 192, -781, -162, -619, 234, -663, -297, -488, -109, -964, -132, -838, -68, -843, 58, -1112, -86, -805, -299, -944, -253, -778, -50, -965, -549, -352, -98, -992, -343, -1117, -315, -1117, -307, -1155, -374, -637, -230, -1166, -43, -1299, -100, -925, -393, -1274, -600, -689, -130, -1479, -312, -1321, -254, -1464, -442, -1292, -613, -1261, -503, -1501, -368, -1322, 26, -1432, -66, -1743, -161, -1644, -467, -1760, -548, -1393, -568, -1556, -871, -1495, -1034, -1387, -571, -1917, -528, -1783, -123, -1897, -231, -2054, -323, -2052, -906, -1976, -567, -1917, -620, -2047, -989, -1077, -370, -2031, -704, -2355, -749, -2740, -1089, -1909, 159, -2012, 248, -626, -123, -2339, -962, -669, -408, -1379, -1174, -452, -364, -1044, -735, -132, 183, -1620, -752, -547, -307, -777, -1261, -98, 41, -880, -1091, -257, 97, -1602, -1833, 31, -26, -644, -561, -180, -546, -385, -1095, -410, -802, -414, -827, -457, -970, -490, -1109, -215, -916, -144, -937, -493, -1269, -517, -1507, 181, 101, -332, -889, -836, -937, -559, -429, -629, -547, -183, -337, -545, -82, -250, -286, 5, -132, -348, -252, -293, -472, -158, 100, -29, 197, -236, -424, -861, -213, -140, -7, -427, -443, 187, -97, -684, -736, -293, 258, -368, -152, -150, 392, -609, 175, -142, 299, -138, 152, -119, 329, -486, -52, 293, 198, -183, 117, 175, 331, -58, -274, 231, 300, -288, 330, -305, 372, -111, 409, -9, 423, 83, 256, 67, 367, -19, 248, 91, 113, -35, 406, -191, 154, 238, 296, 5, 197, 141, 221, 313, 198, 211, 421, 244, 334, 88, 426, -243, 454, 202, 552, -5, 403, 291, 185, 219, 301, 251, 138, 128, 69, 197, 288, -140, -61, 188, 361, 197, 598, 442, 273, 290, 143, 472, 482, 157, 370, 415, 321, 372, 385, 402, 552, 155, 24, 550, 263, -11, 21, 360, 227, 147, -254, 424, 97, 366, -13, 375, 141, 449, 232, 396, 507, 474, 272, 701, 324, 362, -47, 587, 148, 543, 69, 400, -51, 561, 59, 220, -10, 352, 147, 206, 211, 653, 185, 563, 297, 565, 284, 594, 121, 766, 192, 398, 118, 642, 434, 233, 264, 481, 467, 129, -165, 699, 239, 90, 26, 342, 474, -55, 27, 388, 94, -172, 0, 725, 379, -60, 337, 370, 465, 95, 319, 806, 595, 78, 260, 497, 851, 210, 560, 458, 574, -464, 202, 497, 625, -202, 152, 48, 712, -20, 566, 100, 715, 455, 468, 411, 605, 319, 646, 195, 615, 401, 538, 680, 739, 201, 667, 434, 954, 454, 425, 646, 491, 606, 681, 416, 508, 497, 822, 426, 815, 660, 647, 628, 716, 697, 466, 618, 457, 685, 460, 365, 309, 721, 567, 836, 601, 609, 300, 825, 459, 943, 687, 681, 533, 915, 598, 591, 243, 876, 451, 874, 420, 786, 317, 732, 220, 922, 317, 1108, 367, 531, 466, 1028, 649, 1053, 615, 1034, 553, 829, 602, 1021, 799, 927, 803, 878, 763, 799, 496, 1373, 773, 585, 770, 803, 930, 1099, 793, 1222, 862, 1209, 895, 1025, 727, 772, 845, 1172, 1115, 867, 1021, 830, 1013, 841, 910, 506, 703, 1239, 1077, 620, 819, 1196, 1083, 1155, 1081, 1142, 907, 1547, 1121, 1309, 648, 1343, 612, 1484, 988, 1479, 937, 985, 1328, 955, 1341, 429, 910, 841, 1338, 564, 1179, 412, 1156, 1427, 1320, 1434, 1330, 640, 760, 1726, 1410, 190, 555, 1073, 1005, 426, 257, 839, 980, 235, 231, 1520, 1167, 109, 293, 1014, 1569, 305, 142, 1148, 539, -291, -108, 1213, 972, 22, -216, 667, 828, -482, 438, 453, 1431, -581, -422, 789, 387, -358, -454, 174, 780, -36, -372, 390, -134, -629, 160, -306, 751, -1258, -331, 177, 522, -248, 574, -251, 639, -531, 407, -596, 394, -419, 789, -617, 801, -986, 399, -857, 727, -7, 518, -703, 310, -1143, -24, -1002, 287, -960, 363, -1299, 312, -1534, 245, -1557, 305, 28, 153, -859, -175, -33, 332, -1398, -154, 212, 410, -593, -197, -1092, -704, -904, -65, 282, 367, -918, -686, 345, 93, -258, -357, 696, 644, -693, -28, 448, 493, -273, 193, 527, 546, -243, -513, 384, -136, 273, -353, 512, -142, 537, -198, 941, 750, 83, 248, 578, 861, -56, 592, 842, 44, 892, 24, 33, 890, -16, 982, 831, 1398, 1535, 1898, 1716, 1376, 1948, 1465 }; /* 5th LSF quantizer (MR122) */ __device__ static const Word32 dico5_lsf_5[DICO5_SIZE_5 * 4] = { -1002, -929, -1096, -1203, -641, -931, -604, -961, -779, -673, -835, -788, -416, -664, -458, -766, -652, -521, -662, -495, -1023, -509, -1023, -428, -444, -552, -368, -449, -479, -211, -1054, -903, -316, -249, -569, -591, -569, -275, -541, -191, -716, -188, -842, -264, -333, -248, -318, -228, -275, 1, -567, -228, -115, -221, -238, -374, -197, -507, -222, -579, -258, -432, -61, -244, -345, 2, -338, 39, -215, -169, -58, 0, -56, -6, -203, -131, 1, -186, -5, -211, 6, -380, 11, -418, -116, 131, -134, 113, 89, -4, 71, -2, -19, -192, 262, 24, 189, 151, -133, -109, 186, -153, 166, -219, 37, 139, 193, 171, 337, 124, 158, -61, 141, 226, -13, 190, 231, 34, 354, 109, 316, 201, 244, 164, 330, -85, 390, -84, 254, 327, 257, 335, 491, 147, 476, 105, 54, 77, 437, 370, 421, 314, 449, 342, 329, 126, 673, 292, 571, 388, 243, 193, 653, 320, 621, 280, 194, 380, 517, 581, 45, 323, 111, 422, 489, 395, 734, 534, 622, 546, 486, 502, 318, 572, 189, 550, 385, 422, -157, 153, -125, 382, -197, 386, -263, 334, 228, 697, -188, 1, 51, 297, -507, 213, -376, 397, -24, 255, -547, 89, -502, -94, 387, 179, -620, 68, -684, 112, -642, -350, -260, 172, -438, -324, 264, 648, -964, -4, -1121, 7, -134, 134, -1133, -306, 143, 96, -420, -497, -1221, -350, -1527, -685, -161, 72, 873, 691, 732, 283, 921, 353, 334, 475, 1095, 821, 864, 524, 843, 497, 714, 711, 788, 750, 1076, 714, 1204, 753 }; /* Scaling factors for the lsp variability operation */ __device__ static const Word16 lsf_hist_mean_scale[M] = { 20000, 20000, 20000, 20000, 20000, 18000, 16384, 8192, 0, 0 }; /* * The tables contains the following data: * * g_pitch (Q14), * g_fac (Q12), (g_code = g_code0*g_fac), * qua_ener_MR122 (Q10), (log2(g_fac)) * qua_ener (Q10) (20*log10(g_fac)) * * The log2() and log10() values are calculated on the fixed point value * (g_fac Q12) and not on the original floating point value of g_fac * to make the quantizer/MA predictdor use corresponding values. */ #define MR475_VQ_SIZE 256 /* The table contains the following data: * * g_pitch(0) (Q14) for sub- * g_fac(0) (Q12) frame 0 and 2 * g_pitch(1) (Q14) for sub- * g_fac(2) (Q12) frame 1 and 3 * */ __device__ static const Word32 table_gain_MR475[MR475_VQ_SIZE * 4] = { /* * g_pit(0), * g_fac(0), * g_pit(1), * g_fac(1) */ 812, 128, 542, 140, 2873, 1135, 2266, 3402, 2067, 563, 12677, 647, 4132, 1798, 5601, 5285, 7689, 374, 3735, 441, 10912, 2638, 11807, 2494, 20490, 797, 5218, 675, 6724, 8354, 5282, 1696, 1488, 428, 5882, 452, 5332, 4072, 3583, 1268, 2469, 901, 15894, 1005, 14982, 3271, 10331, 4858, 3635, 2021, 2596, 835, 12360, 4892, 12206, 1704, 13432, 1604, 9118, 2341, 3968, 1538, 5479, 9936, 3795, 417, 1359, 414, 3640, 1569, 7995, 3541, 11405, 645, 8552, 635, 4056, 1377, 16608, 6124, 11420, 700, 2007, 607, 12415, 1578, 11119, 4654, 13680, 1708, 11990, 1229, 7996, 7297, 13231, 5715, 2428, 1159, 2073, 1941, 6218, 6121, 3546, 1804, 8925, 1802, 8679, 1580, 13935, 3576, 13313, 6237, 6142, 1130, 5994, 1734, 14141, 4662, 11271, 3321, 12226, 1551, 13931, 3015, 5081, 10464, 9444, 6706, 1689, 683, 1436, 1306, 7212, 3933, 4082, 2713, 7793, 704, 15070, 802, 6299, 5212, 4337, 5357, 6676, 541, 6062, 626, 13651, 3700, 11498, 2408, 16156, 716, 12177, 751, 8065, 11489, 6314, 2256, 4466, 496, 7293, 523, 10213, 3833, 8394, 3037, 8403, 966, 14228, 1880, 8703, 5409, 16395, 4863, 7420, 1979, 6089, 1230, 9371, 4398, 14558, 3363, 13559, 2873, 13163, 1465, 5534, 1678, 13138, 14771, 7338, 600, 1318, 548, 4252, 3539, 10044, 2364, 10587, 622, 13088, 669, 14126, 3526, 5039, 9784, 15338, 619, 3115, 590, 16442, 3013, 15542, 4168, 15537, 1611, 15405, 1228, 16023, 9299, 7534, 4976, 1990, 1213, 11447, 1157, 12512, 5519, 9475, 2644, 7716, 2034, 13280, 2239, 16011, 5093, 8066, 6761, 10083, 1413, 5002, 2347, 12523, 5975, 15126, 2899, 18264, 2289, 15827, 2527, 16265, 10254, 14651, 11319, 1797, 337, 3115, 397, 3510, 2928, 4592, 2670, 7519, 628, 11415, 656, 5946, 2435, 6544, 7367, 8238, 829, 4000, 863, 10032, 2492, 16057, 3551, 18204, 1054, 6103, 1454, 5884, 7900, 18752, 3468, 1864, 544, 9198, 683, 11623, 4160, 4594, 1644, 3158, 1157, 15953, 2560, 12349, 3733, 17420, 5260, 6106, 2004, 2917, 1742, 16467, 5257, 16787, 1680, 17205, 1759, 4773, 3231, 7386, 6035, 14342, 10012, 4035, 442, 4194, 458, 9214, 2242, 7427, 4217, 12860, 801, 11186, 825, 12648, 2084, 12956, 6554, 9505, 996, 6629, 985, 10537, 2502, 15289, 5006, 12602, 2055, 15484, 1653, 16194, 6921, 14231, 5790, 2626, 828, 5615, 1686, 13663, 5778, 3668, 1554, 11313, 2633, 9770, 1459, 14003, 4733, 15897, 6291, 6278, 1870, 7910, 2285, 16978, 4571, 16576, 3849, 15248, 2311, 16023, 3244, 14459, 17808, 11847, 2763, 1981, 1407, 1400, 876, 4335, 3547, 4391, 4210, 5405, 680, 17461, 781, 6501, 5118, 8091, 7677, 7355, 794, 8333, 1182, 15041, 3160, 14928, 3039, 20421, 880, 14545, 852, 12337, 14708, 6904, 1920, 4225, 933, 8218, 1087, 10659, 4084, 10082, 4533, 2735, 840, 20657, 1081, 16711, 5966, 15873, 4578, 10871, 2574, 3773, 1166, 14519, 4044, 20699, 2627, 15219, 2734, 15274, 2186, 6257, 3226, 13125, 19480, 7196, 930, 2462, 1618, 4515, 3092, 13852, 4277, 10460, 833, 17339, 810, 16891, 2289, 15546, 8217, 13603, 1684, 3197, 1834, 15948, 2820, 15812, 5327, 17006, 2438, 16788, 1326, 15671, 8156, 11726, 8556, 3762, 2053, 9563, 1317, 13561, 6790, 12227, 1936, 8180, 3550, 13287, 1778, 16299, 6599, 16291, 7758, 8521, 2551, 7225, 2645, 18269, 7489, 16885, 2248, 17882, 2884, 17265, 3328, 9417, 20162, 11042, 8320, 1286, 620, 1431, 583, 5993, 2289, 3978, 3626, 5144, 752, 13409, 830, 5553, 2860, 11764, 5908, 10737, 560, 5446, 564, 13321, 3008, 11946, 3683, 19887, 798, 9825, 728, 13663, 8748, 7391, 3053, 2515, 778, 6050, 833, 6469, 5074, 8305, 2463, 6141, 1865, 15308, 1262, 14408, 4547, 13663, 4515, 3137, 2983, 2479, 1259, 15088, 4647, 15382, 2607, 14492, 2392, 12462, 2537, 7539, 2949, 12909, 12060, 5468, 684, 3141, 722, 5081, 1274, 12732, 4200, 15302, 681, 7819, 592, 6534, 2021, 16478, 8737, 13364, 882, 5397, 899, 14656, 2178, 14741, 4227, 14270, 1298, 13929, 2029, 15477, 7482, 15815, 4572, 2521, 2013, 5062, 1804, 5159, 6582, 7130, 3597, 10920, 1611, 11729, 1708, 16903, 3455, 16268, 6640, 9306, 1007, 9369, 2106, 19182, 5037, 12441, 4269, 15919, 1332, 15357, 3512, 11898, 14141, 16101, 6854, 2010, 737, 3779, 861, 11454, 2880, 3564, 3540, 9057, 1241, 12391, 896, 8546, 4629, 11561, 5776, 8129, 589, 8218, 588, 18728, 3755, 12973, 3149, 15729, 758, 16634, 754, 15222, 11138, 15871, 2208, 4673, 610, 10218, 678, 15257, 4146, 5729, 3327, 8377, 1670, 19862, 2321, 15450, 5511, 14054, 5481, 5728, 2888, 7580, 1346, 14384, 5325, 16236, 3950, 15118, 3744, 15306, 1435, 14597, 4070, 12301, 15696, 7617, 1699, 2170, 884, 4459, 4567, 18094, 3306, 12742, 815, 14926, 907, 15016, 4281, 15518, 8368, 17994, 1087, 2358, 865, 16281, 3787, 15679, 4596, 16356, 1534, 16584, 2210, 16833, 9697, 15929, 4513, 3277, 1085, 9643, 2187, 11973, 6068, 9199, 4462, 8955, 1629, 10289, 3062, 16481, 5155, 15466, 7066, 13678, 2543, 5273, 2277, 16746, 6213, 16655, 3408, 20304, 3363, 18688, 1985, 14172, 12867, 15154, 15703, 4473, 1020, 1681, 886, 4311, 4301, 8952, 3657, 5893, 1147, 11647, 1452, 15886, 2227, 4582, 6644, 6929, 1205, 6220, 799, 12415, 3409, 15968, 3877, 19859, 2109, 9689, 2141, 14742, 8830, 14480, 2599, 1817, 1238, 7771, 813, 19079, 4410, 5554, 2064, 3687, 2844, 17435, 2256, 16697, 4486, 16199, 5388, 8028, 2763, 3405, 2119, 17426, 5477, 13698, 2786, 19879, 2720, 9098, 3880, 18172, 4833, 17336, 12207, 5116, 996, 4935, 988, 9888, 3081, 6014, 5371, 15881, 1667, 8405, 1183, 15087, 2366, 19777, 7002, 11963, 1562, 7279, 1128, 16859, 1532, 15762, 5381, 14708, 2065, 20105, 2155, 17158, 8245, 17911, 6318, 5467, 1504, 4100, 2574, 17421, 6810, 5673, 2888, 16636, 3382, 8975, 1831, 20159, 4737, 19550, 7294, 6658, 2781, 11472, 3321, 19397, 5054, 18878, 4722, 16439, 2373, 20430, 4386, 11353, 26526, 11593, 3068, 2866, 1566, 5108, 1070, 9614, 4915, 4939, 3536, 7541, 878, 20717, 851, 6938, 4395, 16799, 7733, 10137, 1019, 9845, 964, 15494, 3955, 15459, 3430, 18863, 982, 20120, 963, 16876, 12887, 14334, 4200, 6599, 1220, 9222, 814, 16942, 5134, 5661, 4898, 5488, 1798, 20258, 3962, 17005, 6178, 17929, 5929, 9365, 3420, 7474, 1971, 19537, 5177, 19003, 3006, 16454, 3788, 16070, 2367, 8664, 2743, 9445, 26358, 10856, 1287, 3555, 1009, 5606, 3622, 19453, 5512, 12453, 797, 20634, 911, 15427, 3066, 17037, 10275, 18883, 2633, 3913, 1268, 19519, 3371, 18052, 5230, 19291, 1678, 19508, 3172, 18072, 10754, 16625, 6845, 3134, 2298, 10869, 2437, 15580, 6913, 12597, 3381, 11116, 3297, 16762, 2424, 18853, 6715, 17171, 9887, 12743, 2605, 8937, 3140, 19033, 7764, 18347, 3880, 20475, 3682, 19602, 3380, 13044, 19373, 10526, 23124 }; /* table used in 'high' rates: MR67 MR74 */ #define VQ_SIZE_HIGHRATES 128 __device__ static const Word32 table_gain_highrates[VQ_SIZE_HIGHRATES * 4] = { /* * Note: every 4th value (qua_ener) contains the original values from IS641 * to ensure bit-exactness; however, they are not exactly the * rounded value of (20*log10(g_fac)) */ /* * g_pit, * g_fac, * qua_ener_MR122, * qua_ener */ 577, 662, -2692, -16214, 806, 1836, -1185, -7135, 3109, 1052, -2008, -12086, 4181, 1387, -1600, -9629, 2373, 1425, -1560, -9394, 3248, 1985, -1070, -6442, 1827, 2320, -840, -5056, 941, 3314, -313, -1885, 2351, 2977, -471, -2838, 3616, 2420, -777, -4681, 3451, 3096, -414, -2490, 2955, 4301, 72, 434, 1848, 4500, 139, 836, 3884, 5416, 413, 2484, 1187, 7210, 835, 5030, 3083, 9000, 1163, 7002, 7384, 883, -2267, -13647, 5962, 1506, -1478, -8900, 5155, 2134, -963, -5800, 7944, 2009, -1052, -6335, 6507, 2250, -885, -5327, 7670, 2752, -588, -3537, 5952, 3016, -452, -2724, 4898, 3764, -125, -751, 6989, 3588, -196, -1177, 8174, 3978, -43, -260, 6064, 4404, 107, 645, 7709, 5087, 320, 1928, 5523, 6021, 569, 3426, 7769, 7126, 818, 4926, 6060, 7938, 977, 5885, 5594, 11487, 1523, 9172, 10581, 1356, -1633, -9831, 9049, 1597, -1391, -8380, 9794, 2035, -1033, -6220, 8946, 2415, -780, -4700, 10296, 2584, -681, -4099, 9407, 2734, -597, -3595, 8700, 3218, -356, -2144, 9757, 3395, -277, -1669, 10177, 3892, -75, -454, 9170, 4528, 148, 891, 10152, 5004, 296, 1781, 9114, 5735, 497, 2993, 10500, 6266, 628, 3782, 10110, 7631, 919, 5534, 8844, 8727, 1117, 6728, 8956, 12496, 1648, 9921, 12924, 976, -2119, -12753, 11435, 1755, -1252, -7539, 12138, 2328, -835, -5024, 11388, 2368, -810, -4872, 10700, 3064, -429, -2580, 12332, 2861, -530, -3192, 11722, 3327, -307, -1848, 11270, 3700, -150, -904, 10861, 4413, 110, 663, 12082, 4533, 150, 902, 11283, 5205, 354, 2132, 11960, 6305, 637, 3837, 11167, 7534, 900, 5420, 12128, 8329, 1049, 6312, 10969, 10777, 1429, 8604, 10300, 17376, 2135, 12853, 13899, 1681, -1316, -7921, 12580, 2045, -1026, -6179, 13265, 2439, -766, -4610, 14033, 2989, -465, -2802, 13452, 3098, -413, -2482, 12396, 3658, -167, -1006, 13510, 3780, -119, -713, 12880, 4272, 62, 374, 13533, 4861, 253, 1523, 12667, 5457, 424, 2552, 13854, 6106, 590, 3551, 13031, 6483, 678, 4084, 13557, 7721, 937, 5639, 12957, 9311, 1213, 7304, 13714, 11551, 1532, 9221, 12591, 15206, 1938, 11667, 15113, 1540, -1445, -8700, 15072, 2333, -832, -5007, 14527, 2511, -723, -4352, 14692, 3199, -365, -2197, 15382, 3560, -207, -1247, 14133, 3960, -50, -300, 15102, 4236, 50, 298, 14332, 4824, 242, 1454, 14846, 5451, 422, 2542, 15306, 6083, 584, 3518, 14329, 6888, 768, 4623, 15060, 7689, 930, 5602, 14406, 9426, 1231, 7413, 15387, 9741, 1280, 7706, 14824, 14271, 1844, 11102, 13600, 24939, 2669, 16067, 16396, 1969, -1082, -6517, 16817, 2832, -545, -3283, 15713, 2843, -539, -3248, 16104, 3336, -303, -1825, 16384, 3963, -49, -294, 16940, 4579, 165, 992, 15711, 4599, 171, 1030, 16222, 5448, 421, 2537, 16832, 6382, 655, 3945, 15745, 7141, 821, 4944, 16326, 7469, 888, 5343, 16611, 8624, 1100, 6622, 17028, 10418, 1379, 8303, 15905, 11817, 1565, 9423, 16878, 14690, 1887, 11360, 16515, 20870, 2406, 14483, 18142, 2083, -999, -6013, 19401, 3178, -375, -2257, 17508, 3426, -264, -1589, 20054, 4027, -25, -151, 18069, 4249, 54, 326, 18952, 5066, 314, 1890, 17711, 5402, 409, 2461, 19835, 6192, 610, 3676, 17950, 7014, 795, 4784, 21318, 7877, 966, 5816, 17910, 9289, 1210, 7283, 19144, 9290, 1210, 7284, 20517, 11381, 1510, 9089, 18075, 14485, 1866, 11234, 19999, 17882, 2177, 13108, 18842, 32764, 3072, 18494 }; /* table used in 'low' rates: MR475, MR515, MR59 */ #define VQ_SIZE_LOWRATES 64 __device__ static const Word32 table_gain_lowrates[VQ_SIZE_LOWRATES * 4] = { /* * g_pit, * g_fac, * qua_ener_MR122, * qua_ener */ 10813, 28753, 2879, 17333, 20480, 2785, -570, -3431, 18841, 6594, 703, 4235, 6225, 7413, 876, 5276, 17203, 10444, 1383, 8325, 21626, 1269, -1731, -10422, 21135, 4423, 113, 683, 11304, 1556, -1430, -8609, 19005, 12820, 1686, 10148, 17367, 2498, -731, -4398, 17858, 4833, 244, 1472, 9994, 2498, -731, -4398, 17530, 7864, 964, 5802, 14254, 1884, -1147, -6907, 15892, 3153, -387, -2327, 6717, 1802, -1213, -7303, 18186, 20193, 2357, 14189, 18022, 3031, -445, -2678, 16711, 5857, 528, 3181, 8847, 4014, -30, -180, 15892, 8970, 1158, 6972, 18022, 1392, -1594, -9599, 16711, 4096, 0, 0, 8192, 655, -2708, -16305, 15237, 13926, 1808, 10884, 14254, 3112, -406, -2444, 14090, 4669, 193, 1165, 5406, 2703, -614, -3697, 13434, 6553, 694, 4180, 12451, 901, -2237, -13468, 12451, 2662, -637, -3833, 3768, 655, -2708, -16305, 14745, 23511, 2582, 15543, 19169, 2457, -755, -4546, 20152, 5079, 318, 1913, 6881, 4096, 0, 0, 20480, 8560, 1089, 6556, 19660, 737, -2534, -15255, 19005, 4259, 58, 347, 7864, 2088, -995, -5993, 11468, 12288, 1623, 9771, 15892, 1474, -1510, -9090, 15728, 4628, 180, 1086, 9175, 1433, -1552, -9341, 16056, 7004, 793, 4772, 14827, 737, -2534, -15255, 15073, 2252, -884, -5321, 5079, 1228, -1780, -10714, 13271, 17326, 2131, 12827, 16547, 2334, -831, -5002, 15073, 5816, 518, 3118, 3932, 3686, -156, -938, 14254, 8601, 1096, 6598, 16875, 778, -2454, -14774, 15073, 3809, -107, -646, 6062, 614, -2804, -16879, 9338, 9256, 1204, 7251, 13271, 1761, -1247, -7508, 13271, 3522, -223, -1343, 2457, 1966, -1084, -6529, 11468, 5529, 443, 2668, 10485, 737, -2534, -15255, 11632, 3194, -367, -2212, 1474, 778, -2454, -14774 }; __device__ static const Word32 inter6[61] = { 29443, 28346, 25207, 20449, 14701, 8693, 3143, -1352, -4402, -5865, -5850, -4673, -2783, -672, 1211, 2536, 3130, 2991, 2259, 1170, 0, -1001, -1652, -1868, -1666, -1147, -464, 218, 756, 1060, 1099, 904, 550, 135, -245, -514, -634, -602, -451, -231, 0, 191, 308, 340, 296, 198, 78, -36, -120, -163, -165, -132, -79, -19, 34, 73, 91, 89, 70, 38, 0 }; /* * window for non-MR122 modesm; uses 40 samples lookahead * used only in BuildCNParam */ __device__ static const Word32 window_200_40[L_WINDOW] = { 2621, 2623, 2629, 2638, 2651, 2668, 2689, 2713, 2741, 2772, 2808, 2847, 2890, 2936, 2986, 3040, 3097, 3158, 3223, 3291, 3363, 3438, 3517, 3599, 3685, 3774, 3867, 3963, 4063, 4166, 4272, 4382, 4495, 4611, 4731, 4853, 4979, 5108, 5240, 5376, 5514, 5655, 5800, 5947, 6097, 6250, 6406, 6565, 6726, 6890, 7057, 7227, 7399, 7573, 7750, 7930, 8112, 8296, 8483, 8672, 8863, 9057, 9252, 9450, 9650, 9852, 10055, 10261, 10468, 10677, 10888, 11101, 11315, 11531, 11748, 11967, 12187, 12409, 12632, 12856, 13082, 13308, 13536, 13764, 13994, 14225, 14456, 14688, 14921, 15155, 15389, 15624, 15859, 16095, 16331, 16568, 16805, 17042, 17279, 17516, 17754, 17991, 18228, 18465, 18702, 18939, 19175, 19411, 19647, 19882, 20117, 20350, 20584, 20816, 21048, 21279, 21509, 21738, 21967, 22194, 22420, 22644, 22868, 23090, 23311, 23531, 23749, 23965, 24181, 24394, 24606, 24816, 25024, 25231, 25435, 25638, 25839, 26037, 26234, 26428, 26621, 26811, 26999, 27184, 27368, 27548, 27727, 27903, 28076, 28247, 28415, 28581, 28743, 28903, 29061, 29215, 29367, 29515, 29661, 29804, 29944, 30081, 30214, 30345, 30472, 30597, 30718, 30836, 30950, 31062, 31170, 31274, 31376, 31474, 31568, 31659, 31747, 31831, 31911, 31988, 32062, 32132, 32198, 32261, 32320, 32376, 32428, 32476, 32521, 32561, 32599, 32632, 32662, 32688, 32711, 32729, 32744, 32755, 32763, 32767, 32767, 32741, 32665, 32537, 32359, 32129, 31850, 31521, 31143, 30716, 30242, 29720, 29151, 28538, 27879, 27177, 26433, 25647, 24821, 23957, 23055, 22117, 21145, 20139, 19102, 18036, 16941, 15820, 14674, 13505, 12315, 11106, 9879, 8637, 7381, 6114, 4838, 3554, 2264, 971 }; /* comparision optimization tables */ /* definition of bad speech */ __device__ static const UWord8 table_speech_bad[9] = { 0, 0, 1, 1, 0, 0, 0, 1, 0 }; __device__ static const UWord8 table_SID[9] = { 0, 0, 0, 0, 1, 1, 1, 0, 0 }; __device__ static const UWord8 table_DTX[9] = { 0, 0, 0, 0, 1, 1, 1, 1, 0 }; __device__ static const UWord8 table_mute[9] = { 0, 0, 0, 0, 1, 0, 1, 1, 0 }; /* track start positions for fixed codebook routines */ __device__ static const Word8 startPos[16] = { 0, 2, 0, 3, 0, 2, 0, 3, 1, 3, 2, 4, 1, 4, 1, 4 }; __device__ static Float64 Dotproduct40(Float32 *x, Float32 *y) { Float64 acc; acc = x[0] * y[0] + x[1] * y[1] + x[2] * y[2] + x[3] * y[3]; acc += x[4] * y[4] + x[5] * y[5] + x[6] * y[6] + x[7] * y[7]; acc += x[8] * y[8] + x[9] * y[9] + x[10] * y[10] + x[11] * y[11]; acc += x[12] * y[12] + x[13] * y[13] + x[14] * y[14] + x[15] * y[15]; acc += x[16] * y[16] + x[17] * y[17] + x[18] * y[18] + x[19] * y[19]; acc += x[20] * y[20] + x[21] * y[21] + x[22] * y[22] + x[23] * y[23]; acc += x[24] * y[24] + x[25] * y[25] + x[26] * y[26] + x[27] * y[27]; acc += x[28] * y[28] + x[29] * y[29] + x[30] * y[30] + x[31] * y[31]; acc += x[32] * y[32] + x[33] * y[33] + x[34] * y[34] + x[35] * y[35]; acc += x[36] * y[36] + x[37] * y[37] + x[38] * y[38] + x[39] * y[39]; return(acc); } /* * CodAmrReset * * * Parameters: * state B: state structure * mode I: AMR mode * * Function: * Resets state memory * * Returns: * void */ __device__ static void Decoder_amr_reset(Decoder_amrState *state, enum Mode mode) { Word32 i; /* Cb_gain_average_reset */ memset(state->Cb_gain_averState.cbGainHistory, 0, L_CBGAINHIST << 2); state->Cb_gain_averState.hangVar = 0; state->Cb_gain_averState.hangCount = 0; /* Initialize static position */ state->exc = PIT_MAX + L_INTERPOL; /* Static vectors to zero */ memset(state->old_exc, 0, (PIT_MAX + L_INTERPOL) << 2); if (mode != MRDTX) memset(state->mem_syn, 0, M << 2); /* initialize pitch sharpening */ state->sharp = SHARPMIN; state->old_T0 = 40; /* Initialize state->lsp_old [] */ if (mode != MRDTX) { state->lsp_old[0] = 30000; state->lsp_old[1] = 26000; state->lsp_old[2] = 21000; state->lsp_old[3] = 15000; state->lsp_old[4] = 8000; state->lsp_old[5] = 0; state->lsp_old[6] = -8000; state->lsp_old[7] = -15000; state->lsp_old[8] = -21000; state->lsp_old[9] = -26000; } /* Initialize memories of bad frame handling */ state->prev_bf = 0; state->prev_pdf = 0; state->state = 0; state->T0_lagBuff = 40; state->inBackgroundNoise = 0; state->voicedHangover = 0; if (mode != MRDTX) memset(state->excEnergyHist, 0, 9 << 2); memset(state->ltpGainHistory, 0, 9 << 2); if (mode != MRDTX) { state->lsp_avg_st.lsp_meanSave[0] = 1384; state->lsp_avg_st.lsp_meanSave[1] = 2077; state->lsp_avg_st.lsp_meanSave[2] = 3420; state->lsp_avg_st.lsp_meanSave[3] = 5108; state->lsp_avg_st.lsp_meanSave[4] = 6742; state->lsp_avg_st.lsp_meanSave[5] = 8122; state->lsp_avg_st.lsp_meanSave[6] = 9863; state->lsp_avg_st.lsp_meanSave[7] = 11092; state->lsp_avg_st.lsp_meanSave[8] = 12714; state->lsp_avg_st.lsp_meanSave[9] = 13701; } memset(state->lsfState.past_r_q, 0, M << 2); /* Past dequantized lsfs */ state->lsfState.past_lsf_q[0] = 1384; state->lsfState.past_lsf_q[1] = 2077; state->lsfState.past_lsf_q[2] = 3420; state->lsfState.past_lsf_q[3] = 5108; state->lsfState.past_lsf_q[4] = 6742; state->lsfState.past_lsf_q[5] = 8122; state->lsfState.past_lsf_q[6] = 9863; state->lsfState.past_lsf_q[7] = 11092; state->lsfState.past_lsf_q[8] = 12714; state->lsfState.past_lsf_q[9] = 13701; for (i = 0; i < 5; i++) { state->ec_gain_p_st.pbuf[i] = 1640; } state->ec_gain_p_st.past_gain_pit = 0; state->ec_gain_p_st.prev_gp = 16384; for (i = 0; i < 5; i++) { state->ec_gain_c_st.gbuf[i] = 1; } state->ec_gain_c_st.past_gain_code = 0; state->ec_gain_c_st.prev_gc = 1; if (mode != MRDTX) { for (i = 0; i < NPRED; i++) { state->pred_state.past_qua_en[i] = MIN_ENERGY; state->pred_state.past_qua_en_MR122[i] = MIN_ENERGY_MR122; } } state->nodataSeed = 21845; /* Static vectors to zero */ memset(state->background_state.frameEnergyHist, 0, L_ENERGYHIST << 2); /* Initialize hangover handling */ state->background_state.bgHangover = 0; /* phDispReset */ memset(state->ph_disp_st.gainMem, 0, PHDGAINMEMSIZE << 2); state->ph_disp_st.prevState = 0; state->ph_disp_st.prevCbGain = 0; state->ph_disp_st.lockFull = 0; state->ph_disp_st.onset = 0; /* assume no onset in start */ if (mode != MRDTX) { state->dtxDecoderState.since_last_sid = 0; state->dtxDecoderState.true_sid_period_inv = 8192; state->dtxDecoderState.log_en = 3500; state->dtxDecoderState.old_log_en = 3500; /* low level noise for better performance in DTX handover cases*/ state->dtxDecoderState.pn_seed_rx = PN_INITIAL_SEED; /* Initialize state->lsp [] */ state->dtxDecoderState.lsp[0] = 30000; state->dtxDecoderState.lsp[1] = 26000; state->dtxDecoderState.lsp[2] = 21000; state->dtxDecoderState.lsp[3] = 15000; state->dtxDecoderState.lsp[4] = 8000; state->dtxDecoderState.lsp[5] = 0; state->dtxDecoderState.lsp[6] = -8000; state->dtxDecoderState.lsp[7] = -15000; state->dtxDecoderState.lsp[8] = -21000; state->dtxDecoderState.lsp[9] = -26000; /* Initialize state->lsp_old [] */ state->dtxDecoderState.lsp_old[0] = 30000; state->dtxDecoderState.lsp_old[1] = 26000; state->dtxDecoderState.lsp_old[2] = 21000; state->dtxDecoderState.lsp_old[3] = 15000; state->dtxDecoderState.lsp_old[4] = 8000; state->dtxDecoderState.lsp_old[5] = 0; state->dtxDecoderState.lsp_old[6] = -8000; state->dtxDecoderState.lsp_old[7] = -15000; state->dtxDecoderState.lsp_old[8] = -21000; state->dtxDecoderState.lsp_old[9] = -26000; state->dtxDecoderState.lsf_hist_ptr = 0; state->dtxDecoderState.log_pg_mean = 0; state->dtxDecoderState.log_en_hist_ptr = 0; /* initialize decoder lsf history */ state->dtxDecoderState.lsf_hist[0] = 1384; state->dtxDecoderState.lsf_hist[1] = 2077; state->dtxDecoderState.lsf_hist[2] = 3420; state->dtxDecoderState.lsf_hist[3] = 5108; state->dtxDecoderState.lsf_hist[4] = 6742; state->dtxDecoderState.lsf_hist[5] = 8122; state->dtxDecoderState.lsf_hist[6] = 9863; state->dtxDecoderState.lsf_hist[7] = 11092; state->dtxDecoderState.lsf_hist[8] = 12714; state->dtxDecoderState.lsf_hist[9] = 13701; for (i = 1; i < DTX_HIST_SIZE; i++) { memcpy(&state->dtxDecoderState.lsf_hist[M * i], &state-> dtxDecoderState.lsf_hist[0], M << 2); } memset(state->dtxDecoderState.lsf_hist_mean, 0, M * DTX_HIST_SIZE << 2); /* initialize decoder log frame energy */ for (i = 0; i < DTX_HIST_SIZE; i++) { state->dtxDecoderState.log_en_hist[i] = state->dtxDecoderState.log_en; } state->dtxDecoderState.log_en_adjust = 0; state->dtxDecoderState.dtxHangoverCount = DTX_HANG_CONST; state->dtxDecoderState.decAnaElapsedCount = 31; state->dtxDecoderState.sid_frame = 0; state->dtxDecoderState.valid_data = 0; state->dtxDecoderState.dtxHangoverAdded = 0; state->dtxDecoderState.dtxGlobalState = DTX; state->dtxDecoderState.data_updated = 0; } return; } /* * rx_dtx_handler * * * Parameters: * st->dtxGlobalState I: DTX state * st->since_last_sid B: Frames after last SID frame * st->data_updated I: SID update flag * st->decAnaElapsedCount B: state machine that synch with the GSMEFR txDtx machine * st->dtxHangoverAdded B: DTX hangover * st->sid_frame O: SID frame indicator * st->valid_data O: Vaild data indicator * frame_type O: Frame type * * Function: * Find the new DTX state * * Returns: * DTXStateType DTX, DTX_MUTE or SPEECH */ __device__ static enum DTXStateType rx_dtx_handler(dtx_decState *st, enum RXFrameType frame_type) { enum DTXStateType newState; enum DTXStateType encState; /* DTX if SID frame or previously in DTX{_MUTE} and (NO_RX OR BAD_SPEECH) */ if (table_SID[frame_type] | ((st->dtxGlobalState != SPEECH) & table_speech_bad[frame_type])) { newState = DTX; /* stay in mute for these input types */ if ((st->dtxGlobalState == DTX_MUTE) & table_mute[frame_type]) { newState = DTX_MUTE; } /* * evaluate if noise parameters are too old * since_last_sid is reset when CN parameters have been updated */ st->since_last_sid += 1; /* no update of sid parameters in DTX for a long while */ if ((frame_type != RX_SID_UPDATE) & (st->since_last_sid > DTX_MAX_EMPTY_THRESH)) { newState = DTX_MUTE; } } else { newState = SPEECH; st->since_last_sid = 0; } /* * reset the decAnaElapsed Counter when receiving CNI data the first * time, to robustify counter missmatch after handover * this might delay the bwd CNI analysis in the new decoder slightly. */ if ((st->data_updated == 0) & (frame_type == RX_SID_UPDATE)) { st->decAnaElapsedCount = 0; } /* * update the SPE-SPD DTX hangover synchronization * to know when SPE has added dtx hangover */ st->decAnaElapsedCount += 1; st->dtxHangoverAdded = 0; encState = SPEECH; if (table_DTX[frame_type]) { encState = DTX; if ((frame_type == RX_NO_DATA) & (newState == SPEECH)) { encState = SPEECH; } } if (encState == SPEECH) { st->dtxHangoverCount = DTX_HANG_CONST; } else { if (st->decAnaElapsedCount > DTX_ELAPSED_FRAMES_THRESH) { st->dtxHangoverAdded = 1; st->decAnaElapsedCount = 0; st->dtxHangoverCount = 0; } else if (st->dtxHangoverCount == 0) { st->decAnaElapsedCount = 0; } else { st->dtxHangoverCount -= 1; } } if (newState != SPEECH) { /* * DTX or DTX_MUTE * CN data is not in a first SID, first SIDs are marked as SID_BAD * but will do backwards analysis if a hangover period has been added * according to the state machine above */ st->sid_frame = 0; st->valid_data = 0; if (frame_type == RX_SID_FIRST) { st->sid_frame = 1; } else if (frame_type == RX_SID_UPDATE) { st->sid_frame = 1; st->valid_data = 1; } else if (frame_type == RX_SID_BAD) { st->sid_frame = 1; /* use old data */ st->dtxHangoverAdded = 0; } } /* newState is used by both SPEECH AND DTX synthesis routines */ return newState; } /* * Lsf_lsp * * * Parameters: * lsf I: vector of LSFs * lsp O: vector of LSPs * * Function: * Transformation lsf to lsp, order M * * Returns: * void */ __device__ static void Lsf_lsp(Word32 lsf[], Word32 lsp[]) { Word32 i, ind, offset, tmp; for (i = 0; i < M; i++) { /* ind = b8-b15 of lsf[i] */ ind = lsf[i] >> 8; /* offset = b0-b7 of lsf[i] */ offset = lsf[i] & 0x00ff; /* lsp[i] = table[ind]+ ((table[ind+1]-table[ind])*offset) / 256 */ tmp = ((cos_table[ind + 1] - cos_table[ind])*offset) << 1; lsp[i] = cos_table[ind] + (tmp >> 9); } return; } /* * D_plsf_3 * * * Parameters: * st->past_lsf_q I: Past dequantized LFSs * st->past_r_q B: past quantized residual * mode I: AMR mode * bfi B: bad frame indicator * indice I: quantization indices of 3 submatrices, Q0 * lsp1_q O: quantized 1st LSP vector * * Function: * Decodes the LSP parameters using the received quantization indices. * 1st order MA prediction and split by 3 vector quantization (split-VQ) * * Returns: * void */ __device__ static void D_plsf_3(D_plsfState *st, enum Mode mode, Word16 bfi, Word16 * indice, Word32 *lsp1_q) { Word32 lsf1_r[M], lsf1_q[M]; Word32 i, index, temp; const Word32 *p_cb1, *p_cb2, *p_cb3, *p_dico; /* if bad frame */ if (bfi != 0) { /* use the past LSFs slightly shifted towards their mean */ for (i = 0; i < M; i++) { /* lsfi_q[i] = ALPHA*past_lsf_q[i] + ONE_ALPHA*meanLsf[i]; */ lsf1_q[i] = ((st->past_lsf_q[i] * ALPHA) >> 15) + ((mean_lsf_3[i] * ONE_ALPHA) >> 15); } /* estimate past quantized residual to be used in next frame */ if (mode != MRDTX) { for (i = 0; i < M; i++) { /* temp = meanLsf[i] + pastR2_q[i] * pred_fac; */ temp = mean_lsf_3[i] + ((st->past_r_q[i] * pred_fac[i]) >> 15); st->past_r_q[i] = lsf1_q[i] - temp; } } else { for (i = 0; i < M; i++) { /* temp = meanLsf[i] + pastR2_q[i]; */ temp = mean_lsf_3[i] + st->past_r_q[i]; st->past_r_q[i] = lsf1_q[i] - temp; } } } /* if good LSFs received */ else { if ((mode == MR475) | (mode == MR515)) { /* MR475, MR515 */ p_cb1 = dico1_lsf_3; p_cb2 = dico2_lsf_3; p_cb3 = mr515_3_lsf; } else if (mode == MR795) { /* MR795 */ p_cb1 = mr795_1_lsf; p_cb2 = dico2_lsf_3; p_cb3 = dico3_lsf_3; } else { /* MR59, MR67, MR74, MR102, MRDTX */ p_cb1 = dico1_lsf_3; p_cb2 = dico2_lsf_3; p_cb3 = dico3_lsf_3; } /* decode prediction residuals from 3 received indices */ index = *indice++; p_dico = &p_cb1[index + index + index]; index = *indice++; lsf1_r[0] = *p_dico++; lsf1_r[1] = *p_dico++; lsf1_r[2] = *p_dico++; if ((mode == MR475) | (mode == MR515)) { /* MR475, MR515 only using every second entry */ index = index << 1; } p_dico = &p_cb2[index + index + index]; index = *indice++; lsf1_r[3] = *p_dico++; lsf1_r[4] = *p_dico++; lsf1_r[5] = *p_dico++; p_dico = &p_cb3[index << 2]; lsf1_r[6] = *p_dico++; lsf1_r[7] = *p_dico++; lsf1_r[8] = *p_dico++; lsf1_r[9] = *p_dico++; /* Compute quantized LSFs and update the past quantized residual */ if (mode != MRDTX) { for (i = 0; i < M; i++) { lsf1_q[i] = lsf1_r[i] + (mean_lsf_3[i] + ((st->past_r_q[i] * pred_fac[i]) >> 15)); } memcpy(st->past_r_q, lsf1_r, M << 2); } else { for (i = 0; i < M; i++) { lsf1_q[i] = lsf1_r[i] + (mean_lsf_3[i] + st->past_r_q[i]); } memcpy(st->past_r_q, lsf1_r, M << 2); } } /* verification that LSFs has minimum distance of LSF_GAP Hz */ temp = LSF_GAP; for (i = 0; i < M; i++) { if (lsf1_q[i] < temp) { lsf1_q[i] = temp; } temp = lsf1_q[i] + LSF_GAP; } memcpy(st->past_lsf_q, lsf1_q, M << 2); /* convert LSFs to the cosine domain */ Lsf_lsp(lsf1_q, lsp1_q); return; } /* * pseudonoise * * * Parameters: * shift_reg B: Old CN generator shift register state * no_bits I: Number of bits * * Function: * pseudonoise * * Returns: * noise_bits */ __device__ static Word32 pseudonoise(Word32 *shift_reg, Word32 no_bits) { Word32 noise_bits, Sn, i; Word32 s_reg; s_reg = *shift_reg; noise_bits = 0; for (i = 0; i < no_bits; i++) { /* State n == 31 */ Sn = s_reg & 0x00000001L; /* State n == 3 */ if (s_reg & 0x10000000L) { Sn = Sn ^ 0x1L; } else { Sn = Sn ^ 0x0L; } noise_bits = (noise_bits << 1) | (s_reg & 1); s_reg = s_reg >> 1; if (Sn & 1) { s_reg = s_reg | 0x40000000L; } } *shift_reg = s_reg; return noise_bits; } /* * Lsp_lsf * * * Parameters: * lsp I: LSP vector (range: -1<=val<1) * lsf O: LSF vector Old CN generator shift register state * * Function: * Transformation lsp to lsf, LPC order M * lsf[i] = arccos(lsp[i])/(2*pi) * * Returns: * void */ __device__ static void Lsp_lsf(Word32 lsp[], Word32 lsf[]) { Word32 i, ind = 63; /* begin at end of table -1 */ for (i = M - 1; i >= 0; i--) { /* find value in table that is just greater than lsp[i] */ while (cos_table[ind] < lsp[i]) { ind--; } lsf[i] = ((((lsp[i] - cos_table[ind]) * acos_slope[ind]) + 0x800) >> 12) + (ind << 8); } return; } /* * Reorder_lsf * * * Parameters: * lsf B: vector of LSFs (range: 0<=val<=0.5) * min_dist I: minimum required distance * * Function: * Make sure that the LSFs are properly ordered and to keep a certain minimum * distance between adjacent LSFs. LPC order = M. * * Returns: * void */ __device__ static void Reorder_lsf(Word32 *lsf, Word32 min_dist) { Word32 lsf_min, i; lsf_min = min_dist; for (i = 0; i < M; i++) { if (lsf[i] < lsf_min) { lsf[i] = lsf_min; } lsf_min = lsf[i] + min_dist; } } /* VC5.0 Global optimization does not work with this function */ #if _MSC_VER == 1100 #pragma optimize( "g", off ) #endif /* * Get_lsp_pol * * * Parameters: * lsp I: line spectral frequencies * f O: polynomial F1(z) or F2(z) * * Function: * Find the polynomial F1(z) or F2(z) from the LSPs. * * F1(z) = product ( 1 - 2 lsp[i] z^-1 + z^-2 ) * i=0,2,4,6,8 * F2(z) = product ( 1 - 2 lsp[i] z^-1 + z^-2 ) * i=1,3,5,7,9 * * where lsp[] is the LSP vector in the cosine domain. * * The expansion is performed using the following recursion: * * f[0] = 1 * b = -2.0 * lsp[0] * f[1] = b * for i=2 to 5 do * b = -2.0 * lsp[2*i-2]; * f[i] = b*f[i-1] + 2.0*f[i-2]; * for j=i-1 down to 2 do * f[j] = f[j] + b*f[j-1] + f[j-2]; * f[1] = f[1] + b; * * Returns: * void */ __device__ static void Get_lsp_pol(Word32 *lsp, Word32 *f) { volatile Word32 f0, f1, f2, f3, f4, f5; Word32 l1, l2, l3, l4; /* f[0] = 1.0; */ f0 = 16777216L; /* f1 = *lsp * -1024; */ f1 = -lsp[0] << 10; l1 = lsp[2]; l2 = lsp[4]; l3 = lsp[6]; l4 = lsp[8]; f2 = f0 << 1; f2 -= (((f1 >> 16) * l1) + (((f1 & 0xFFFE) * l1) >> 16)) << 2; f1 -= l1 << 10; f3 = f1 << 1; f3 -= (((f2 >> 16) * l2) + (((f2 & 0xFFFE) * l2) >> 16)) << 2; f2 += f0; f2 -= (((f1 >> 16) * l2) + (((f1 & 0xFFFE) * l2) >> 16)) << 2; f1 -= l2 << 10; f4 = f2 << 1; f4 -= (((f3 >> 16) * l3) + (((f3 & 0xFFFE) * l3) >> 16)) << 2; f3 += f1; f3 -= (((f2 >> 16) * l3) + (((f2 & 0xFFFE) * l3) >> 16)) << 2; f2 += f0; f2 -= (((f1 >> 16) * l3) + (((f1 & 0xFFFE) * l3) >> 16)) << 2; f1 -= l3 << 10; f5 = f3 << 1; f5 -= (((f4 >> 16) * l4) + (((f4 & 0xFFFE) * l4) >> 16)) << 2; f4 += f2; f4 -= (((f3 >> 16) * l4) + (((f3 & 0xFFFE) * l4) >> 16)) << 2; f3 += f1; f3 -= (((f2 >> 16) * l4) + (((f2 & 0xFFFE) * l4) >> 16)) << 2; f2 += f0; f2 -= (((f1 >> 16) * l4) + (((f1 & 0xFFFE) * l4) >> 16)) << 2; f1 -= l4 << 10; f[0] = f0; f[1] = f1; f[2] = f2; f[3] = f3; f[4] = f4; f[5] = f5; return; } #if _MSC_VER == 1100 #pragma optimize( "", on ) #endif /* * Lsp_Az * * * Parameters: * lsp I: Line spectral frequencies * a O: Predictor coefficients * * Function: * Converts from the line spectral pairs (LSP) to LP coefficients, * for a 10th order filter. * * Find the coefficients of F1(z) and F2(z) * Multiply F1(z) by 1+z^{-1} and F2(z) by 1-z^{-1} * A(z) = ( F1(z) + F2(z) ) / 2 * * Returns: * void */ __device__ static void Lsp_Az(Word32 lsp[], Word32 a[]) { Word32 f1[6], f2[6]; Word32 T0, i, j; Get_lsp_pol(&lsp[0], f1); Get_lsp_pol(&lsp[1], f2); for (i = 5; i > 0; i--) { f1[i] += f1[i - 1]; f2[i] -= f2[i - 1]; } a[0] = 4096; for (i = 1, j = 10; i <= 5; i++, j--) { T0 = f1[i] + f2[i]; a[i] = (Word16)(T0 >> 13); /* emulate fixed point bug */ if ((T0 & 4096) != 0) { a[i]++; } T0 = f1[i] - f2[i]; a[j] = (Word16)(T0 >> 13); /* emulate fixed point bug */ if ((T0 & 4096) != 0) { a[j]++; } } return; } /* * A_Refl * * * Parameters: * a I: Directform coefficients * refl O: Reflection coefficients * * Function: * Converts from the directform coefficients to reflection coefficients * * Returns: * void */ __device__ static void A_Refl(Word32 a[], Word32 refl[]) { /* local variables */ int normShift; Word32 aState[M], bState[M]; Word32 normProd, acc, temp, mult, scale, i, j; /* initialize states */ memcpy(aState, a, M << 2); /* backward Levinson recursion */ for (i = M - 1; i >= 0; i--) { if (labs(aState[i]) >= 4096) { goto ExitRefl; } refl[i] = aState[i] << 3; temp = (refl[i] * refl[i]) << 1; acc = (MAX_32 - temp); normShift = 0; if (acc != 0) { temp = acc; while (!(temp & 0x40000000)) { normShift++; temp = temp << 1; } } else { normShift = 0; } scale = 15 - normShift; acc = (acc << normShift); temp = (acc + (Word32)0x00008000L); if (temp > 0) { normProd = temp >> 16; mult = 0x20000000L / normProd; } else mult = 16384; for (j = 0; j < i; j++) { acc = aState[j] << 16; acc -= (refl[i] * aState[i - j - 1]) << 1; temp = (acc + (Word32)0x00008000L) >> 16; temp = (mult * temp) << 1; if (scale > 0) { if ((temp & ((Word32)1 << (scale - 1))) != 0) { temp = (temp >> scale) + 1; } else temp = (temp >> scale); } else temp = (temp >> scale); if (labs(temp) > 32767) { goto ExitRefl; } bState[j] = temp; } memcpy(aState, bState, i << 2); } return; ExitRefl: memset(refl, 0, M << 2); } /* * Log2_norm * * * Parameters: * x I: input value * exp I: exponent * exponent O: Integer part of Log2. (range: 0<=val<=30) * fraction O: Fractional part of Log2. (range: 0<=val<1) * * Function: * Computes log2 * * Computes log2(L_x, exp), where L_x is positive and * normalized, and exp is the normalisation exponent * If L_x is negative or zero, the result is 0. * * The function Log2(L_x) is approximated by a table and linear * interpolation. The following steps are used to compute Log2(L_x) * * exponent = 30-normExponent * i = bit25-b31 of L_x; 32<=i<=63 (because of normalization). * a = bit10-b24 * i -=32 * fraction = table[i]<<16 - (table[i] - table[i+1]) * a * 2 * * Returns: * void */ __device__ static void Log2_norm(Word32 x, Word32 exp, Word32 *exponent, Word32 * fraction) { Word32 y, i, a; if (x <= 0) { *exponent = 0; *fraction = 0; return; } /* Extract b25-b31 */ i = x >> 25; i = i - 32; /* Extract b10-b24 of fraction */ a = x >> 9; a = a & 0xFFFE; /* 2a */ /* fraction */ y = (log2_table[i] << 16) - a * (log2_table[i] - log2_table[i + 1]); *fraction = y >> 16; *exponent = 30 - exp; return; } /* * Log2 * * * Parameters: * x I: input value * exponent O: Integer part of Log2. (range: 0<=val<=30) * fraction O: Fractional part of Log2. (range: 0<=val<1) * * Function: * Computes log2(L_x) * If x is negative or zero, the result is 0. * * Returns: * void */ __device__ static void Log2(Word32 x, Word32 *exponent, Word32 *fraction) { int tmp, exp = 0; if (x != 0) { tmp = x; while (!((tmp & 0x80000000) ^ ((tmp & 0x40000000) << 1))) { exp++; tmp = tmp << 1; } } Log2_norm(x << exp, exp, exponent, fraction); } /* * Pow2 * * * Parameters: * exponent I: Integer part. (range: 0<=val<=30) * fraction O: Fractional part. (range: 0.0<=val<1.0) * * Function: * pow(2.0, exponent.fraction) * * The function Pow2(L_x) is approximated by a table and linear interpolation. * * i = bit10-b15 of fraction, 0 <= i <= 31 * a = biT0-b9 of fraction * x = table[i]<<16 - (table[i] - table[i+1]) * a * 2 * x = L_x >> (30-exponent) (with rounding) * * Returns: * result (range: 0<=val<=0x7fffffff) */ __device__ static Word32 Pow2(Word32 exponent, Word32 fraction) { Word32 i, a, tmp, x, exp; /* Extract b10-b16 of fraction */ i = fraction >> 10; /* Extract b0-b9 of fraction */ a = (fraction << 5) & 0x7fff; /* table[i] << 16 */ x = pow2_table[i] << 16; /* table[i] - table[i+1] */ tmp = pow2_table[i] - pow2_table[i + 1]; /* L_x -= tmp*a*2 */ x -= (tmp * a) << 1; if (exponent >= -1) { exp = (30 - exponent); /* Rounding */ if ((x & ((Word32)1 << (exp - 1))) != 0) { x = (x >> exp) + 1; } else x = x >> exp; } else x = 0; return(x); } /* * Build_CN_code * * * Parameters: * seed B: Old CN generator shift register state * cod O: Generated CN fixed codebook vector * * Function: * Generate CN fixed codebook vector * * Returns: * void */ __device__ static void Build_CN_code(Word32 *seed, Word32 cod[]) { Word32 i, j, k; memset(cod, 0, L_SUBFR << 2); for (k = 0; k < 10; k++) { i = pseudonoise(seed, 2); /* generate pulse position */ i = (i * 20) >> 1; i = (i + k); j = pseudonoise(seed, 1); /* generate sign */ if (j > 0) { cod[i] = 4096; } else { cod[i] = -4096; } } return; } /* * Build_CN_param * * * Parameters: * seed B: Old CN generator shift register state * nParam I: number of params * paramSizeTable I: size of params * parm O: CN Generated params * * Function: * Generate parameters for comfort noise generation * * Returns: * void */ __device__ static void Build_CN_param(Word16 *seed, enum Mode mode, Word16 parm[]) { Word32 i; const Word32 *p; *seed = (Word16)((*seed * 31821) + 13849L); p = &window_200_40[*seed & 0x7F]; switch (mode) { case MR122: for (i = 0; i < PRMNO_MR122; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR122[i])); } break; case MR102: for (i = 0; i < PRMNO_MR102; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR102[i])); } break; case MR795: for (i = 0; i < PRMNO_MR795; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR795[i])); } break; case MR74: for (i = 0; i < PRMNO_MR74; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR74[i])); } break; case MR67: for (i = 0; i < PRMNO_MR67; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR67[i])); } break; case MR59: for (i = 0; i < PRMNO_MR59; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR59[i])); } break; case MR515: for (i = 0; i < PRMNO_MR515; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR515[i])); } break; case MR475: for (i = 0; i < PRMNO_MR475; i++) { parm[i] = (Word16)(*p++ & ~(0xFFFF << bitno_MR475[i])); } break; } } /* * Syn_filt * * * Parameters: * a I: prediction coefficients [M+1] * x I: input signal * y O: output signal * lg I: size of filtering * mem B: memory associated with this filtering * update I: 0=no update, 1=update of memory. * * Function: * Perform synthesis filtering through 1/A(z). * * Returns: * void */ __device__ static Word32 Syn_filt(Word32 a[], Word32 x[], Word32 y[], Word32 lg, Word32 mem[] , Word32 update) { Word32 tmp[50]; /* malloc is slow */ Word32 s, a0, overflow = 0; Word32 *yy, *yy_limit; /* Copy mem[] to yy[] */ memcpy(tmp, mem, 40); yy = tmp + M; yy_limit = yy + lg; a0 = a[0]; /* Do the filtering. */ while (yy < yy_limit) { s = *x++ * a0; s -= yy[-1] * a[1]; s -= yy[-2] * a[2]; s -= yy[-3] * a[3]; s -= yy[-4] * a[4]; s -= yy[-5] * a[5]; s -= yy[-6] * a[6]; s -= yy[-7] * a[7]; s -= yy[-8] * a[8]; s -= yy[-9] * a[9]; s -= yy[-10] * a[10]; if (labs(s) < 0x7ffffff) *yy = (s + 0x800L) >> 12; else if (s > 0) { *yy = 32767; overflow = 1; } else { *yy = -32768; overflow = 1; } yy++; } memcpy(y, &tmp[M], lg << 2); /* Update of memory if update==1 */ if (update) { memcpy(mem, &y[lg - M], 40); } return overflow; } /* * Syn_filt_overflow * * * Parameters: * a I: prediction coefficients [M+1] * x I: input signal * y O: output signal * lg I: size of filtering * mem B: memory associated with this filtering * update I: 0=no update, 1=update of memory. * * Function: * Perform synthesis filtering through 1/A(z). * Saturate after every multiplication. * Returns: * void */ __device__ static void Syn_filt_overflow(Word32 a[], Word32 x[], Word32 y[], Word32 lg, Word32 mem[] , Word32 update) { Word32 tmp[50]; /* malloc is slow */ Word32 i, j, s, a0; Word32 *yy; /* Copy mem[] to yy[] */ memcpy(tmp, mem, 40); yy = tmp + M; a0 = a[0]; /* Do the filtering. */ for (i = 0; i < lg; i++) { s = x[i] * a0; for (j = 1; j <= M; j++) { s -= a[j] * yy[-j]; if (s > 1073741823) { s = 1073741823; } else if (s < -1073741824) { s = -1073741824; } } if (labs(s) < 0x7FFE800) *yy = (s + 0x800L) >> 12; else if (s > 0) { *yy = 32767; } else { *yy = -32768; } yy++; } memcpy(y, &tmp[M], lg << 2); /* Update of memory if update==1 */ if (update) { memcpy(mem, &y[lg - M], 40); } return; } /* * dtx_dec * * * Parameters: * st B: DTX state struct * mem_syn I: AMR decoder state * lsfState B: LSF state struct * pred_state->past_qua_en O: table of past quantized energies * pred_state->past_qua_en_MR122 O: table of past quantized energies MR122 * averState->hangVar O: * averState->hangCount O: hangover variable * new_state I: new DTX state * mode I: AMR mode * parm I: vector of synthesis parameters * synth O: synthesised speech * A_t O: decoded LP filter in 4 subframes * * Function: * DTX * * Returns: * void */ __device__ static void dtx_dec(dtx_decState *st, Word32 *mem_syn, D_plsfState *lsfState, dec_gc_predState *pred_state, Cb_gain_averageState *averState, enum DTXStateType new_state, enum Mode mode, Word16 parm[], Word32 synth[], Word32 A_t[]) { Word32 ex[L_SUBFR], acoeff[11], acoeff_variab[M + 1], lsp_int[M]; Word32 refl[M], lsf[M], lsf_int[M], lsf_int_variab[M], lsp_int_variab[M]; Word32 i, j, int_fac, log_en_int, pred_err, log_pg_e, log_pg_m, log_pg; Word32 negative, lsf_mean, lsf_variab_index, lsf_variab_factor, ptr; Word16 log_en_index, log_en_int_e, log_en_int_m, level, ma_pred_init, tmp_int_length; if ((st->dtxHangoverAdded != 0) & (st->sid_frame != 0)) { /* * sidFirst after dtx hangover period * or sidUpd after dtxhangover */ /* set log_en_adjust to correct value */ st->log_en_adjust = dtx_log_en_adjust[mode]; ptr = st->lsf_hist_ptr + M; if (ptr == 80) { ptr = 0; } memcpy(&st->lsf_hist[ptr], &st->lsf_hist[st->lsf_hist_ptr], M << 2); ptr = st->log_en_hist_ptr + 1; if (ptr == DTX_HIST_SIZE) { ptr = 0; } st->log_en_hist[ptr] = st->log_en_hist[st->log_en_hist_ptr]; /* Q11 */ /* * compute mean log energy and lsp * from decoded signal (SID_FIRST) */ st->log_en = 0; memset(lsf, 0, M << 2); /* average energy and lsp */ for (i = 0; i < DTX_HIST_SIZE; i++) { st->log_en = st->log_en + (st->log_en_hist[i] >> 3); for (j = 0; j < M; j++) { lsf[j] += st->lsf_hist[i * M + j]; } } for (j = 0; j < M; j++) { lsf[j] = lsf[j] >> 3; /* divide by 8 */ } Lsf_lsp(lsf, st->lsp); /* * make log_en speech coder mode independent * added again later before synthesis */ st->log_en = st->log_en - st->log_en_adjust; /* compute lsf variability vector */ memcpy(st->lsf_hist_mean, st->lsf_hist, 80 << 2); for (i = 0; i < M; i++) { lsf_mean = 0; /* compute mean lsf */ for (j = 0; j < 8; j++) { lsf_mean += st->lsf_hist_mean[i + j * M]; } lsf_mean = lsf_mean >> 3; /* * subtract mean and limit to within reasonable limits * moreover the upper lsf's are attenuated */ for (j = 0; j < 8; j++) { /* subtract mean */ st->lsf_hist_mean[i + j * M] = st->lsf_hist_mean[i + j * M] - lsf_mean; /* attenuate deviation from mean, especially for upper lsf's */ st->lsf_hist_mean[i + j * M] = (st->lsf_hist_mean[i + j * M] * lsf_hist_mean_scale[i]) >> 15; /* limit the deviation */ if (st->lsf_hist_mean[i + j * M] < 0) { negative = 1; } else { negative = 0; } st->lsf_hist_mean[i + j * M] = labs(st->lsf_hist_mean[i + j * M]); /* apply soft limit */ if (st->lsf_hist_mean[i + j * M] > 655) { st->lsf_hist_mean[i + j * M] = 655 + ((st->lsf_hist_mean[i + j * M] - 655) >> 2); } /* apply hard limit */ if (st->lsf_hist_mean[i + j * M] > 1310) { st->lsf_hist_mean[i + j * M] = 1310; } if (negative != 0) { st->lsf_hist_mean[i + j * M] = -st->lsf_hist_mean[i + j * M]; } } } } if (st->sid_frame != 0) { /* * Set old SID parameters, always shift * even if there is no new valid_data */ memcpy(st->lsp_old, st->lsp, M << 2); st->old_log_en = st->log_en; if (st->valid_data != 0) /* new data available (no CRC) */ { /* Compute interpolation factor, since the division only works * for values of since_last_sid < 32 we have to limit the * interpolation to 32 frames */ tmp_int_length = st->since_last_sid; st->since_last_sid = 0; if (tmp_int_length > 32) { tmp_int_length = 32; } if (tmp_int_length >= 2) { st->true_sid_period_inv = 0x2000000 / (tmp_int_length << 10); } else { st->true_sid_period_inv = 16384; /* 0.5 it Q15 */ } memcpy(lsfState->past_r_q, &past_rq_init[parm[0] * M], M << 2); D_plsf_3(lsfState, MRDTX, 0, &parm[1], st->lsp); /* reset for next speech frame */ memset(lsfState->past_r_q, 0, M << 2); log_en_index = parm[4]; /* Q11 and divide by 4 */ st->log_en = (Word16)(log_en_index << 9); /* Subtract 2.5 in Q11 */ st->log_en = (Word16)(st->log_en - 5120); /* Index 0 is reserved for silence */ if (log_en_index == 0) { st->log_en = MIN_16; } /* * no interpolation at startup after coder reset * or when SID_UPD has been received right after SPEECH */ if ((st->data_updated == 0) || (st->dtxGlobalState == SPEECH)) { memcpy(st->lsp_old, st->lsp, M << 2); st->old_log_en = st->log_en; } } /* endif valid_data */ /* initialize gain predictor memory of other modes */ ma_pred_init = (Word16)((st->log_en >> 1) - 9000); if (ma_pred_init > 0) { ma_pred_init = 0; } if (ma_pred_init < -14436) { ma_pred_init = -14436; } pred_state->past_qua_en[0] = ma_pred_init; pred_state->past_qua_en[1] = ma_pred_init; pred_state->past_qua_en[2] = ma_pred_init; pred_state->past_qua_en[3] = ma_pred_init; /* past_qua_en for other modes than MR122 */ ma_pred_init = (Word16)((5443 * ma_pred_init) >> 15); /* scale down by factor 20*log10(2) in Q15 */ pred_state->past_qua_en_MR122[0] = ma_pred_init; pred_state->past_qua_en_MR122[1] = ma_pred_init; pred_state->past_qua_en_MR122[2] = ma_pred_init; pred_state->past_qua_en_MR122[3] = ma_pred_init; } /* endif sid_frame */ /* * CN generation * recompute level adjustment factor Q11 * st->log_en_adjust = 0.9*st->log_en_adjust + * 0.1*dtx_log_en_adjust[mode]); */ st->log_en_adjust = (Word16)(((st->log_en_adjust * 29491) >> 15) + (( (dtx_log_en_adjust[mode] << 5) * 3277) >> 20)); /* Interpolate SID info */ /* Q10 */ if (st->since_last_sid > 30) int_fac = 32767; else int_fac = (Word16)((st->since_last_sid + 1) << 10); /* Q10 * Q15 -> Q10 */ int_fac = (int_fac * st->true_sid_period_inv) >> 15; /* Maximize to 1.0 in Q10 */ if (int_fac > 1024) { int_fac = 1024; } /* Q10 -> Q14 */ int_fac = (Word16)(int_fac << 4); /* Q14 * Q11->Q26 */ log_en_int = (int_fac * st->log_en) << 1; for (i = 0; i < M; i++) { /* Q14 * Q15 -> Q14 */ lsp_int[i] = (int_fac * st->lsp[i]) >> 15; } /* 1-k in Q14 */ int_fac = 16384 - int_fac; /* (Q14 * Q11 -> Q26) + Q26 -> Q26 */ log_en_int += (int_fac * st->old_log_en) << 1; for (i = 0; i < M; i++) { /* Q14 + (Q14 * Q15 -> Q14) -> Q14 */ lsp_int[i] = lsp_int[i] + ((int_fac * st->lsp_old[i]) >> 15); /* Q14 -> Q15 */ lsp_int[i] = lsp_int[i] << 1; } /* compute the amount of lsf variability */ /* -0.6 in Q12 */ lsf_variab_factor = st->log_pg_mean - 2457; /* *0.3 Q12*Q15 -> Q12 */ lsf_variab_factor = 4096 - ((lsf_variab_factor * 9830) >> 15); /* limit to values between 0..1 in Q12 */ if (lsf_variab_factor >= 4096) { lsf_variab_factor = 32767; } else if (lsf_variab_factor < 0) { lsf_variab_factor = 0; } else lsf_variab_factor = lsf_variab_factor << 3; /* -> Q15 */ /* get index of vector to do variability with */ lsf_variab_index = pseudonoise(&st->pn_seed_rx, 3); /* convert to lsf */ Lsp_lsf(lsp_int, lsf_int); /* apply lsf variability */ memcpy(lsf_int_variab, lsf_int, M << 2); for (i = 0; i < M; i++) { lsf_int_variab[i] = lsf_int_variab[i] + ((lsf_variab_factor * st-> lsf_hist_mean[i + lsf_variab_index * M]) >> 15); } /* make sure that LSP's are ordered */ Reorder_lsf(lsf_int, LSF_GAP); Reorder_lsf(lsf_int_variab, LSF_GAP); /* copy lsf to speech decoders lsf state */ memcpy(lsfState->past_lsf_q, lsf_int, M << 2); /* convert to lsp */ Lsf_lsp(lsf_int, lsp_int); Lsf_lsp(lsf_int_variab, lsp_int_variab); /* Compute acoeffs Q12 acoeff is used for level * normalization and Post_Filter, acoeff_variab is * used for synthesis filter * by doing this we make sure that the level * in high frequenncies does not jump up and down */ Lsp_Az(lsp_int, acoeff); Lsp_Az(lsp_int_variab, acoeff_variab); /* For use in Post_Filter */ memcpy(&A_t[0], acoeff, MP1 << 2); memcpy(&A_t[MP1], acoeff, MP1 << 2); memcpy(&A_t[MP1 << 1], acoeff, MP1 << 2); memcpy(&A_t[MP1 + MP1 + MP1], acoeff, MP1 << 2); /* Compute reflection coefficients Q15 */ A_Refl(&acoeff[1], refl); /* Compute prediction error in Q15 */ /* 0.99997 in Q15 */ pred_err = MAX_16; for (i = 0; i < M; i++) { pred_err = (pred_err * (MAX_16 - ((refl[i] * refl[i]) >> 15))) >> 15; } /* compute logarithm of prediction gain */ Log2(pred_err, &log_pg_e, &log_pg_m); /* convert exponent and mantissa to Word16 Q12 */ /* Q12 */ log_pg = (log_pg_e - 15) << 12; /* saturate */ if (log_pg < -32768) { log_pg = -32768; } log_pg = (-(log_pg + (log_pg_m >> 3))) >> 1; st->log_pg_mean = (Word16)(((29491 * st->log_pg_mean) >> 15) + ((3277 * log_pg) >> 15)); /* Compute interpolated log energy */ /* Q26 -> Q16 */ log_en_int = log_en_int >> 10; /* Add 4 in Q16 */ log_en_int += 262144L; /* subtract prediction gain */ log_en_int = log_en_int - (log_pg << 4); /* adjust level to speech coder mode */ log_en_int += st->log_en_adjust << 5; log_en_int_e = (Word16)(log_en_int >> 16); log_en_int_m = (Word16)((log_en_int - (log_en_int_e << 16)) >> 1); /* Q4 */ level = (Word16)(Pow2(log_en_int_e, log_en_int_m)); for (i = 0; i < 4; i++) { /* Compute innovation vector */ Build_CN_code(&st->pn_seed_rx, ex); for (j = 0; j < L_SUBFR; j++) { ex[j] = (level * ex[j]) >> 15; } /* Synthesize */ Syn_filt(acoeff_variab, ex, &synth[i * L_SUBFR], L_SUBFR, mem_syn, 1); } /* next i */ /* reset codebook averaging variables */ averState->hangVar = 20; averState->hangCount = 0; if (new_state == DTX_MUTE) { /* * mute comfort noise as it has been quite a long time since * last SID update was performed */ Word32 num, denom; tmp_int_length = st->since_last_sid; if (tmp_int_length > 32) { tmp_int_length = 32; } if (tmp_int_length == 1) { st->true_sid_period_inv = MAX_16; } else { num = 1024; denom = (tmp_int_length << 10); st->true_sid_period_inv = 0; for (i = 0; i < 15; i++) { st->true_sid_period_inv <<= 1; num <<= 1; if (num >= denom) { num = num - denom; st->true_sid_period_inv += 1; } } } st->since_last_sid = 0; memcpy(st->lsp_old, st->lsp, M << 2); st->old_log_en = st->log_en; /* subtract 1/8 in Q11 i.e -6/8 dB */ st->log_en = st->log_en - 256; if (st->log_en < -32768) st->log_en = -32768; } /* * reset interpolation length timer * if data has been updated. */ if ((st->sid_frame != 0) & ((st->valid_data != 0) || ((st->valid_data == 0) & (st->dtxHangoverAdded != 0)))) { st->since_last_sid = 0; st->data_updated = 1; } return; } /* * lsp_avg * * * Parameters: * st->lsp_meanSave B: LSP averages * lsp I: LSPs * * Function: * Calculate the LSP averages * * Returns: * void */ __device__ static void lsp_avg(lsp_avgState *st, Word32 *lsp) { Word32 i, tmp; for (i = 0; i < M; i++) { /* mean = 0.84*mean */ tmp = (st->lsp_meanSave[i] << 16); tmp -= (EXPCONST * st->lsp_meanSave[i]) << 1; /* Add 0.16 of newest LSPs to mean */ tmp += (EXPCONST * lsp[i]) << 1; /* Save means */ tmp += 0x00008000L; st->lsp_meanSave[i] = tmp >> 16; } return; } /* * Int_lpc_1and3 * * * Parameters: * lsp_old I: LSP vector at the 4th subfr. of past frame [M] * lsp_mid I: LSP vector at the 2nd subframe of present frame [M] * lsp_new I: LSP vector at the 4th subframe of present frame [M] * Az O: interpolated LP parameters in subframes 1 and 3 * [AZ_SIZE] * * Function: * Interpolates the LSPs and converts to LPC parameters * to get a different LP filter in each subframe. * * The 20 ms speech frame is divided into 4 subframes. * The LSPs are quantized and transmitted at the 2nd and * 4th subframes (twice per frame) and interpolated at the * 1st and 3rd subframe. * * Returns: * void */ __device__ static void Int_lpc_1and3(Word32 lsp_old[], Word32 lsp_mid[], Word32 lsp_new[], Word32 Az[]) { Word32 lsp[M]; Word32 i; /* lsp[i] = lsp_mid[i] * 0.5 + lsp_old[i] * 0.5 */ for (i = 0; i < 10; i++) { lsp[i] = (lsp_mid[i] >> 1) + (lsp_old[i] >> 1); } /* Subframe 1 */ Lsp_Az(lsp, Az); Az += MP1; /* Subframe 2 */ Lsp_Az(lsp_mid, Az); Az += MP1; for (i = 0; i < 10; i++) { lsp[i] = (lsp_mid[i] >> 1) + (lsp_new[i] >> 1); } /* Subframe 3 */ Lsp_Az(lsp, Az); Az += MP1; /* Subframe 4 */ Lsp_Az(lsp_new, Az); return; } /* * Int_lpc_1to3 * * * Parameters: * lsp_old I: LSP vector at the 4th subframe of past frame [M] * lsp_new I: LSP vector at the 4th subframe of present frame [M] * Az O: interpolated LP parameters in all subframes * [AZ_SIZE] * * Function: * Interpolates the LSPs and converts to LPC parameters to get a different * LP filter in each subframe. * * The 20 ms speech frame is divided into 4 subframes. * The LSPs are quantized and transmitted at the 4th * subframes (once per frame) and interpolated at the * 1st, 2nd and 3rd subframe. * * Returns: * void */ __device__ static void Int_lpc_1to3(Word32 lsp_old[], Word32 lsp_new[], Word32 Az[]) { Word32 lsp[M]; Word32 i; for (i = 0; i < 10; i++) { lsp[i] = (lsp_new[i] >> 2) + (lsp_old[i] - (lsp_old[i] >> 2)); } /* Subframe 1 */ Lsp_Az(lsp, Az); Az += MP1; for (i = 0; i < 10; i++) { lsp[i] = (lsp_old[i] >> 1) + (lsp_new[i] >> 1); } /* Subframe 2 */ Lsp_Az(lsp, Az); Az += MP1; for (i = 0; i < 10; i++) { lsp[i] = (lsp_old[i] >> 2) + (lsp_new[i] - (lsp_new[i] >> 2)); } /* Subframe 3 */ Lsp_Az(lsp, Az); Az += MP1; /* Subframe 4 */ Lsp_Az(lsp_new, Az); return; } /* * D_plsf_5 * * * Parameters: * st->past_lsf_q I: Past dequantized LFSs * st->past_r_q B: past quantized residual * bfi B: bad frame indicator * indice I: quantization indices of 3 submatrices, Q0 * lsp1_q O: quantized 1st LSP vector * lsp2_q O: quantized 2nd LSP vector * * Function: * Decodes the 2 sets of LSP parameters in a frame * using the received quantization indices. * * Returns: * void */ __device__ static void D_plsf_5(D_plsfState *st, Word16 bfi, Word16 *indice, Word32 *lsp1_q , Word32 *lsp2_q) { Word32 lsf1_r[M], lsf2_r[M], lsf1_q[M], lsf2_q[M]; Word32 i, temp1, temp2, sign; const Word32 *p_dico; /* if bad frame */ if (bfi != 0) { /* use the past LSFs slightly shifted towards their mean */ for (i = 0; i < M; i += 2) { /* lsfi_q[i] = ALPHA*st->past_lsf_q[i] + ONE_ALPHA*meanLsf[i]; */ lsf1_q[i] = ((st->past_lsf_q[i] * ALPHA_122) >> 15) + ((mean_lsf_5[i] * ONE_ALPHA_122) >> 15); lsf1_q[i + 1] = ((st->past_lsf_q[i + 1] * ALPHA_122) >> 15) + (( mean_lsf_5[i + 1] * ONE_ALPHA_122) >> 15); } memcpy(lsf2_q, lsf1_q, M << 2); /* estimate past quantized residual to be used in next frame */ for (i = 0; i < M; i += 2) { /* temp = meanLsf[i] + st->past_r_q[i] * LSPPpred_facMR122; */ temp1 = mean_lsf_5[i] + ((st->past_r_q[i] * LSP_PRED_FAC_MR122) >> 15); temp2 = mean_lsf_5[i + 1] + ((st->past_r_q[i + 1] * LSP_PRED_FAC_MR122 ) >> 15); st->past_r_q[i] = lsf2_q[i] - temp1; st->past_r_q[i + 1] = lsf2_q[i + 1] - temp2; } } /* if good LSFs received */ else { /* decode prediction residuals from 5 received indices */ p_dico = &dico1_lsf_5[indice[0] << 2]; lsf1_r[0] = *p_dico++; lsf1_r[1] = *p_dico++; lsf2_r[0] = *p_dico++; lsf2_r[1] = *p_dico++; p_dico = &dico2_lsf_5[indice[1] << 2]; lsf1_r[2] = *p_dico++; lsf1_r[3] = *p_dico++; lsf2_r[2] = *p_dico++; lsf2_r[3] = *p_dico++; sign = (Word16)(indice[2] & 1); i = indice[2] >> 1; p_dico = &dico3_lsf_5[i << 2]; if (sign == 0) { lsf1_r[4] = *p_dico++; lsf1_r[5] = *p_dico++; lsf2_r[4] = *p_dico++; lsf2_r[5] = *p_dico++; } else { lsf1_r[4] = (Word16)(-(*p_dico++)); lsf1_r[5] = (Word16)(-(*p_dico++)); lsf2_r[4] = (Word16)(-(*p_dico++)); lsf2_r[5] = (Word16)(-(*p_dico++)); } p_dico = &dico4_lsf_5[(indice[3] << 2)]; lsf1_r[6] = *p_dico++; lsf1_r[7] = *p_dico++; lsf2_r[6] = *p_dico++; lsf2_r[7] = *p_dico++; p_dico = &dico5_lsf_5[(indice[4] << 2)]; lsf1_r[8] = *p_dico++; lsf1_r[9] = *p_dico++; lsf2_r[8] = *p_dico++; lsf2_r[9] = *p_dico++; /* Compute quantized LSFs and update the past quantized residual */ for (i = 0; i < M; i++) { temp1 = mean_lsf_5[i] + ((st->past_r_q[i] * LSP_PRED_FAC_MR122) >> 15); lsf1_q[i] = lsf1_r[i] + temp1; lsf2_q[i] = lsf2_r[i] + temp1; st->past_r_q[i] = lsf2_r[i]; } } /* verification that LSFs have minimum distance of LSF_GAP Hz */ Reorder_lsf(lsf1_q, LSF_GAP); Reorder_lsf(lsf2_q, LSF_GAP); memcpy(st->past_lsf_q, lsf2_q, M << 2); /* convert LSFs to the cosine domain */ Lsf_lsp(lsf1_q, lsp1_q); Lsf_lsp(lsf2_q, lsp2_q); return; } /* * Dec_lag3 * * * Parameters: * index I: received pitch index * t0_min I: minimum of search range * t0_max I: maximum of search range * i_subfr I: subframe flag * T0_prev I: integer pitch delay of last subframe used * in 2nd and 4th subframes * T0 O: integer part of pitch lag * T0_frac O : fractional part of pitch lag * flag4 I : flag for encoding with 4 bits * Function: * Decoding of fractional pitch lag with 1/3 resolution. * Extract the integer and fraction parts of the pitch lag from * the received adaptive codebook index. * * The fractional lag in 1st and 3rd subframes is encoded with 8 bits * while that in 2nd and 4th subframes is relatively encoded with 4, 5 * and 6 bits depending on the mode. * * Returns: * void */ __device__ static void Dec_lag3(Word32 index, Word32 t0_min, Word32 t0_max, Word32 i_subfr , Word32 T0_prev, Word32 *T0, Word32 *T0_frac, Word32 flag4) { Word32 i, tmp_lag; /* if 1st or 3rd subframe */ if (i_subfr == 0) { if (index < 197) { *T0 = (((index + 2) * 10923) >> 15) + 19; i = *T0 + *T0 + *T0; *T0_frac = (index - i) + 58; } else { *T0 = index - 112; *T0_frac = 0; } } /* 2nd or 4th subframe */ else { if (flag4 == 0) { /* 'normal' decoding: either with 5 or 6 bit resolution */ i = (((index + 2) * 10923) >> 15) - 1; *T0 = i + t0_min; i = i + i + i; *T0_frac = (index - 2) - i; } else { /* decoding with 4 bit resolution */ tmp_lag = T0_prev; if ((tmp_lag - t0_min) > 5) tmp_lag = t0_min + 5; if ((t0_max - tmp_lag) > 4) tmp_lag = t0_max - 4; if (index < 4) { i = (tmp_lag - 5); *T0 = i + index; *T0_frac = 0; } else { if (index < 12) { i = (((index - 5) * 10923) >> 15) - 1; *T0 = i + tmp_lag; i = i + i + i; *T0_frac = (index - 9) - i; } else { i = (index - 12) + tmp_lag; *T0 = i + 1; *T0_frac = 0; } } } /* end if (decoding with 4 bit resolution) */ } return; } /* * Pred_lt_3or6_40 * * * Parameters: * exc B: excitation buffer * T0 I: integer pitch lag * frac I: fraction of lag * flag3 I: if set, upsampling rate = 3 (6 otherwise) * * Function: * Compute the result of long term prediction with fractional * interpolation of resolution 1/3 or 1/6. (Interpolated past excitation). * * Once the fractional pitch lag is determined, * the adaptive codebook vector v(n) is computed by interpolating * the past excitation signal u(n) at the given integer delay k * and phase (fraction) : * * 9 9 * v(n) = SUM[ u(n-k-i) * b60(t+i*6) ] + SUM[ u(n-k+1+i) * b60(6-t+i*6) ], * i=0 i=0 * n = 0, ...,39, t = 0, ...,5. * * The interpolation filter b60 is based on a Hamming windowed sin(x)/x * function truncated at \A1\C0 59 and padded with zeros at \A1\C0 60 (b60(60)=0)). * The filter has a cut-off frequency (-3 dB) at 3 600 Hz in * the over-sampled domain. * * Returns: * void */ __device__ static void Pred_lt_3or6_40(Word32 exc[], Word32 T0, Word32 frac, Word32 flag3) { Word32 s, i; Word32 *x0, *x1, *x2; const Word32 *c1, *c2; x0 = &exc[-T0]; frac = -frac; if (flag3 != 0) { frac <<= 1; /* inter_3l[k] = inter6[2*k] -> k' = 2*k */ } if (frac < 0) { frac += 6; x0--; } c1 = &inter6[frac]; c2 = &inter6[6 - frac]; for (i = 0; i < 40; i++) { x1 = x0++; x2 = x0; s = x1[0] * c1[0]; s += x1[-1] * c1[6]; s += x1[-2] * c1[12]; s += x1[-3] * c1[18]; s += x1[-4] * c1[24]; s += x1[-5] * c1[30]; s += x1[-6] * c1[36]; s += x1[-7] * c1[42]; s += x1[-8] * c1[48]; s += x1[-9] * c1[54]; s += x2[0] * c2[0]; s += x2[1] * c2[6]; s += x2[2] * c2[12]; s += x2[3] * c2[18]; s += x2[4] * c2[24]; s += x2[5] * c2[30]; s += x2[6] * c2[36]; s += x2[7] * c2[42]; s += x2[8] * c2[48]; s += x2[9] * c2[54]; exc[i] = (s + 0x4000) >> 15; } } /* * Dec_lag6 * * * Parameters: * index I: received pitch index * pit_min I: minimum pitch lag * pit_max I: maximum pitch lag * i_subfr I: subframe flag * T0 B: integer part of pitch lag * T0_frac O : fractional part of pitch lag * * Function: * Decoding of fractional pitch lag with 1/6 resolution. * Extract the integer and fraction parts of the pitch lag from * the received adaptive codebook index. * * The fractional lag in 1st and 3rd subframes is encoded with 9 bits * while that in 2nd and 4th subframes is relatively encoded with 6 bits. * Note that in relative encoding only 61 values are used. If the * decoder receives 61, 62, or 63 as the relative pitch index, it means * that a transmission error occurred. In this case, the pitch lag from * previous subframe (actually from previous frame) is used. * * Returns: * void */ __device__ static void Dec_lag6(Word32 index, Word32 pit_min, Word32 pit_max, Word32 i_subfr, Word32 *T0, Word32 *T0_frac) { Word32 t0_min, t0_max, i; /* if 1st or 3rd subframe */ if (i_subfr == 0) { if (index < 463) { /* T0 = (index+5)/6 + 17 */ *T0 = (index + 5) / 6 + 17; i = *T0 + *T0 + *T0; /* *T0_frac = index - T0*6 + 105 */ *T0_frac = (index - (i + i)) + 105; } else { *T0 = index - 368; *T0_frac = 0; } } /* second or fourth subframe */ else { /* find t0_min and t0_max for 2nd (or 4th) subframe */ t0_min = *T0 - 5; if (t0_min < pit_min) { t0_min = pit_min; } t0_max = t0_min + 9; if (t0_max > pit_max) { t0_max = pit_max; t0_min = t0_max - 9; } /* i = (index+5)/6 - 1 */ i = (index + 5) / 6 - 1; *T0 = i + t0_min; i = i + i + i; *T0_frac = (index - 3) - (i + i); } } /* * decompress10 * * * Parameters: * MSBs I: MSB part of the index * LSBs I: LSB part of the index * index1 I: index for first pos in posIndex * index2 I: index for second pos in posIndex * index3 I: index for third pos in posIndex * pos_indx O: position of 3 pulses (decompressed) * Function: * Decompression of the linear codeword * * Returns: * void */ __device__ static void decompress10(Word32 MSBs, Word32 LSBs, Word32 index1, Word32 index2 , Word32 index3, Word32 pos_indx[]) { Word32 divMSB; if (MSBs > 124) { MSBs = 124; } /* * pos_indx[index1] = ((MSBs-25*(MSBs/25))%5)*2 + (LSBs-4*(LSBs/4))%2; * pos_indx[index2] = ((MSBs-25*(MSBs/25))/5)*2 + (LSBs-4*(LSBs/4))/2; * pos_indx[index3] = (MSBs/25)*2 + LSBs/4; */ divMSB = MSBs / 25; pos_indx[index1] = (((MSBs - 25 * (divMSB)) % 5) << 1) + (LSBs & 0x1 ); pos_indx[index2] = (((MSBs - 25 * (divMSB)) / 5) << 1) + ((LSBs & 0x2) >> 1); pos_indx[index3] = (divMSB << 1) + (LSBs >> 2); return; } /* * decompress_codewords * * * Parameters: * indx I: position of 8 pulses (compressed) * pos_indx O: position index of 8 pulses (position only) * * Function: * Decompression of the linear codewords to 4+three indeces * one bit from each pulse is made robust to errors by * minimizing the phase shift of a bit error. * * i0,i4,i1 => one index (7+3) bits, 3 LSBs more robust * i2,i6,i5 => one index (7+3) bits, 3 LSBs more robust * i3,i7 => one index (5+2) bits, 2-3 LSbs more robust * * Returns: * void */ __device__ static void decompress_codewords(Word16 indx[], Word32 pos_indx[]) { Word32 ia, ib, MSBs, LSBs, MSBs0_24, tmp; /* * First index: 10x10x10 -> 2x5x2x5x2x5-> 125x2x2x2 -> 7+1x3 bits * MSBs = indx[NB_TRACK]/8; * LSBs = indx[NB_TRACK]%8; */ MSBs = *indx >> 3; LSBs = *indx & 0x7; decompress10(MSBs, LSBs, 0, 4, 1, pos_indx); /* * Second index: 10x10x10 -> 2x5x2x5x2x5-> 125x2x2x2 -> 7+1x3 bits * MSBs = indx[NB_TRACK+1]/8; * LSBs = indx[NB_TRACK+1]%8; */ MSBs = indx[1] >> 3; LSBs = indx[1] & 0x7; decompress10(MSBs, LSBs, 2, 6, 5, pos_indx); /* * Third index: 10x10 -> 2x5x2x5-> 25x2x2 -> 5+1x2 bits * MSBs = indx[NB_TRACK+2]/4; * LSBs = indx[NB_TRACK+2]%4; * MSBs0_24 = (MSBs*25+12)/32; * if ((MSBs0_24/5)%2==1) * pos_indx[3] = (4-(MSBs0_24%5))*2 + LSBs%2; * else * pos_indx[3] = (MSBs0_24%5)*2 + LSBs%2; * pos_indx[7] = (MSBs0_24/5)*2 + LSBs/2; */ MSBs = indx[2] >> 2; LSBs = indx[2] & 0x3; MSBs0_24 = (((MSBs * 25) + 12) >> 5); tmp = (MSBs0_24 * 6554) >> 15; ia = tmp & 0x1; ib = (MSBs0_24 - (tmp * 5)); if (ia == 1) { ib = 4 - ib; } pos_indx[3] = (ib << 1) + (LSBs & 0x1); pos_indx[7] = (tmp << 1) + (LSBs >> 1); } /* * decode_2i40_9bits * * * Parameters: * subNr I: subframe number * sign I: signs of 2 pulses * index I: Positions of the 2 pulses * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_2i40_9bits(Word32 subNr, Word32 sign, Word32 index, Word32 cod[]) { Word32 pos[2]; Word32 i, j, k; /* Decode the positions */ /* table bit is the MSB */ j = (index & 64) >> 6; i = index & 7; /* pos0 =i*5+startPos[j*8+subNr*2] */ i = (i + (i << 2)); k = startPos[(j << 3) + (subNr << 1)]; pos[0] = i + k; index = index >> 3; i = index & 7; /* pos1 =i*5+startPos[j*8+subNr*2+1] */ i = (i + (i << 2)); k = startPos[((j << 3) + (subNr << 1)) + 1]; pos[1] = (Word16)(i + k); /* decode the signs and build the codeword */ memset(cod, 0, L_SUBFR << 2); for (j = 0; j < 2; j++) { i = sign & 1; sign = sign >> 1; if (i != 0) { cod[pos[j]] = 8191; /* +1.0 */ } else { cod[pos[j]] = -8192; /* -1.0 */ } } return; } /* * decode_2i40_11bits * * * Parameters: * sign I: signs of 2 pulses * index I: Positions of the 2 pulses * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_2i40_11bits(Word32 sign, Word32 index, Word32 cod[]) { Word32 pos[2]; Word32 i, j; /* Decode the positions */ j = index & 1; index = index >> 1; i = index & 7; /* pos0 =i*5+1+j*2 */ i = (i + (i << 2)); i = (i + 1); j = (j << 1); pos[0] = i + j; index = index >> 3; j = index & 3; index = index >> 2; i = index & 7; if (j == 3) { /* pos1 =i*5+4 */ i = (i + (i << 2)); pos[1] = i + 4; } else { /* pos1 =i*5+j */ i = (i + (i << 2)); pos[1] = i + j; } /* decode the signs and build the codeword */ memset(cod, 0, L_SUBFR << 2); for (j = 0; j < 2; j++) { i = sign & 1; sign = sign >> 1; if (i != 0) { cod[pos[j]] = 8191; /* +1.0 */ } else { cod[pos[j]] = -8192; /* -1.0 */ } } return; } /* * decode_3i40_14bits * * * Parameters: * sign I: signs of 3 pulses * index I: Positions of the 3 pulses * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_3i40_14bits(Word32 sign, Word32 index, Word32 cod[]) { Word32 pos[3]; Word32 i, j; /* Decode the positions */ i = index & 7; /* pos0 =i*5 */ pos[0] = i + (i << 2); index = index >> 3; j = index & 1; index = index >> 1; i = index & 7; /* pos1 =i*5+1+j*2 */ i = (i + (i << 2)); i = (i + 1); j = (j << 1); pos[1] = i + j; index = index >> 3; j = index & 1; index = index >> 1; i = index & 7; /* pos2 =i*5+2+j*2 */ i = (i + (i << 2)); i = (i + 2); j = (j << 1); pos[2] = i + j; /* decode the signs and build the codeword */ memset(cod, 0, L_SUBFR << 2); for (j = 0; j < 3; j++) { i = sign & 1; sign = sign >> 1; if (i > 0) { cod[pos[j]] = 8191; /* +1.0 */ } else { cod[pos[j]] = -8192; /* -1.0 */ } } return; } /* * decode_3i40_14bits * * * Parameters: * sign I: signs of 4 pulses * index I: Positions of the 4 pulses * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_4i40_17bits(Word32 sign, Word32 index, Word32 cod[]) { Word32 pos[4]; Word32 i, j; /* Decode the positions */ i = index & 7; i = dgray[i]; /* pos0 =i*5 */ pos[0] = i + (i << 2); index = index >> 3; i = index & 7; i = dgray[i]; /* pos1 =i*5+1 */ i = (i + (i << 2)); pos[1] = i + 1; index = index >> 3; i = index & 7; i = dgray[i]; /* pos2 =i*5+1 */ i = (i + (i << 2)); pos[2] = i + 2; index = index >> 3; j = index & 1; index = index >> 1; i = index & 7; i = dgray[i]; /* pos3 =i*5+3+j */ i = (i + (i << 2)); i = (i + 3); pos[3] = i + j; /* decode the signs and build the codeword */ memset(cod, 0, L_SUBFR << 2); for (j = 0; j < 4; j++) { i = sign & 1; sign = sign >> 1; if (i != 0) { cod[pos[j]] = 8191; } else { cod[pos[j]] = -8192; } } return; } /* * decode_8i40_31bits * * * Parameters: * index I: index of 8 pulses (sign+position) * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_8i40_31bits(Word16 index[], Word32 cod[]) { Word32 linear_codewords[8]; Word32 i, j, pos1, pos2, sign; memset(cod, 0, L_CODE << 2); decompress_codewords(&index[NB_TRACK_MR102], linear_codewords); /* decode the positions and signs of pulses and build the codeword */ for (j = 0; j < NB_TRACK_MR102; j++) { /* compute index i */ i = linear_codewords[j]; i <<= 2; /* position of pulse "j" */ pos1 = i + j; if (index[j] == 0) { sign = POS_CODE; /* +1.0 */ } else { sign = -NEG_CODE; /* -1.0 */ } /* compute index i */ i = linear_codewords[j + 4]; i = i << 2; /* position of pulse "j+4" */ pos2 = i + j; cod[pos1] = sign; if (pos2 < pos1) { sign = -(sign); } cod[pos2] = cod[pos2] + sign; } return; } /* * decode_10i40_35bits * * * Parameters: * index I: index of 10 pulses (sign+position) * cod O: algebraic (fixed) codebook excitation * * Function: * Algebraic codebook decoder * * Returns: * void */ __device__ static void decode_10i40_35bits(Word16 index[], Word32 cod[]) { Word32 i, j, pos1, pos2, sign, tmp; memset(cod, 0, L_CODE << 2); /* decode the positions and signs of pulses and build the codeword */ for (j = 0; j < 5; j++) { /* compute index i */ tmp = index[j]; i = tmp & 7; i = dgray[i]; i = (i * 5); /* position of pulse "j" */ pos1 = (i + j); i = (tmp >> 3) & 1; if (i == 0) { sign = 4096; /* +1.0 */ } else { sign = -4096; /* -1.0 */ } /* compute index i */ i = index[j + 5] & 7; i = dgray[i]; i = i * 5; /* position of pulse "j+5" */ pos2 = (i + j); cod[pos1] = sign; if (pos2 < pos1) { sign = -(sign); } cod[pos2] = cod[pos2] + sign; } return; } /* * gmed_n * * * Parameters: * ind I: values * n I: The number of gains (odd) * * Function: * Calculates N-point median. * * Returns: * index of the median value */ __device__ static Word32 gmed_n(Word32 ind[], Word32 n) { Word32 tmp[NMAX], tmp2[NMAX]; Word32 max, medianIndex, i, j, ix = 0; for (i = 0; i < n; i++) { tmp2[i] = ind[i]; } for (i = 0; i < n; i++) { max = -32767; for (j = 0; j < n; j++) { if (tmp2[j] >= max) { max = tmp2[j]; ix = j; } } tmp2[ix] = -32768; tmp[i] = ix; } medianIndex = tmp[(n >> 1)]; return(ind[medianIndex]); } /* * ec_gain_pitch * * * Parameters: * st->pbuf I: last five gains * st->past_gain_pit I: past gain * state I: state of the state machine * gain_pitch O: pitch gain * * Function: * Calculates pitch from previous values. * * Returns: * void */ __device__ static void ec_gain_pitch(ec_gain_pitchState *st, Word16 state, Word32 * gain_pitch) { Word32 tmp; /* calculate median of last five gains */ tmp = gmed_n(st->pbuf, 5); /* new gain = minimum(median, past_gain) * pdown[state] */ if (tmp > st->past_gain_pit) { tmp = st->past_gain_pit; } *gain_pitch = (tmp * pdown[state]) >> 15; } /* * d_gain_pitch * * * Parameters: * mode I: AMR mode * index I: index of quantization * * Function: * Decodes the pitch gain using the received index * * Returns: * gain */ __device__ static Word32 d_gain_pitch(enum Mode mode, Word32 index) { Word32 gain; if (mode == MR122) { /* clear 2 LSBits */ gain = (qua_gain_pitch[index] >> 2) << 2; } else { gain = qua_gain_pitch[index]; } return gain; } /* * ec_gain_pitch_update * * * Parameters: * st->prev_gp B: previous pitch gain * st->past_gain_pit O: past gain * st->pbuf B: past gain buffer * bfi I: bad frame indicator * prev_bf I: previous frame was bad * gain_pitch B: pitch gain * * Function: * Update the pitch gain concealment state * Limit gain_pitch if the previous frame was bad * * Returns: * gain */ __device__ static void ec_gain_pitch_update(ec_gain_pitchState *st, Word32 bfi, Word32 prev_bf, Word32 *gain_pitch) { if (bfi == 0) { if (prev_bf != 0) { if (*gain_pitch > st->prev_gp) { *gain_pitch = st->prev_gp; } } st->prev_gp = *gain_pitch; } st->past_gain_pit = *gain_pitch; /* if (st->past_gain_pit > 1.0) */ if (st->past_gain_pit > 16384) { st->past_gain_pit = 16384; } st->pbuf[0] = st->pbuf[1]; st->pbuf[1] = st->pbuf[2]; st->pbuf[2] = st->pbuf[3]; st->pbuf[3] = st->pbuf[4]; st->pbuf[4] = st->past_gain_pit; } /* * gc_pred (366) * * * Parameters: * st->past_qua_en I: MA predictor * st->past_qua_en_MR122 I: MA predictor MR122 * mode I: AMR mode * code I: innovative codebook vector * exp_gcode0 O: predicted gain factor (exponent) * frac_gcode0 O: predicted gain factor (fraction) * exp_en I: innovation energy (MR795) (exponent) * frac_en I: innovation energy (MR795) (fraction) * * Function: * MA prediction of the innovation energy * * Mean removed innovation energy (dB) in subframe n * N-1 * E(n) = 10*log(gc*gc * SUM[(code(i) * code(i)]/N) - EMean * i=0 * N=40 * * Mean innovation energy (dB) * N-1 * Ei(n) = 10*log(SUM[(code(i) * code(i)]/N) * i=0 * * Predicted energy * 4 * Ep(n) = SUM[b(i) * R(n-i)] * i=1 * b = [0.68 0.58 0.34 0.19] * R(k) is quantified prediction error at subframe k * * E_Mean = 36 dB (MR122) * * Predicted gain gc is found by * * gc = POW[10, 0.05 * (Ep(n) + EMean - Ei)] * * Returns: * void */ __device__ static void gc_pred(dec_gc_predState *st, enum Mode mode, Word32 *code, Word32 * exp_gcode0, Word32 *frac_gcode0, Word32 *exp_en, Word32 *frac_en) { Word32 exp, frac, ener_code = 0, i = 0; /* energy of code: * ener_code = sum(code[i]^2) */ while (i < L_SUBFR) { ener_code += code[i] * code[i]; i++; } if ((0x3fffffff <= ener_code) | (ener_code < 0)) ener_code = MAX_32; else ener_code <<= 1; if (mode == MR122) { Word32 ener; /* ener_code = ener_code / lcode; lcode = 40; 1/40 = 26214 Q20 */ ener_code = ((ener_code + 0x00008000L) >> 16) * 52428; /* Q9 * Q20 -> Q30 */ /* energy of code: * ener_code(Q17) = 10 * Log10(energy) / constant * = 1/2 * Log2(energy) * constant = 20*Log10(2) */ /* ener_code = 1/2 * Log2(ener_code); Note: Log2=log2+30 */ Log2(ener_code, &exp, &frac); ener_code = ((exp - 30) << 16) + (frac << 1); /* Q16 for log(), ->Q17 for 1/2 log() */ /* * predicted energy: * ener(Q24) = (Emean + sum{pred[i]*pastEn[i]})/constant * = MEAN_ENER + sum(pred[i]*past_qua_en[i]) * constant = 20*Log10(2) */ ener = 0; i = 0; while (i < 4) { ener += st->past_qua_en_MR122[i] * pred_MR122[i]; i++; } ener <<= 1; ener += MEAN_ENER_MR122; /* * predicted codebook gain * gc0 = Pow10( (ener*constant - ener_code*constant) / 20 ) * = Pow2(ener-ener_code) * = Pow2(int(d)+frac(d)) */ ener = (ener - ener_code) >> 1; /* Q16 */ *exp_gcode0 = ener >> 16; *frac_gcode0 = (ener >> 1) - (*exp_gcode0 << 15); } /* all modes except 12.2 */ else { Word32 tmp, gcode0; int exp_code; /* * Compute: meansEner - 10log10(ener_code/ LSufr) */ exp_code = 0; if (ener_code != 0) { while (!(ener_code & 0x40000000)) { exp_code++; ener_code = ener_code << 1; } } /* Log2 = log2 + 27 */ Log2_norm(ener_code, exp_code, &exp, &frac); /* fact = 10/log2(10) = 3.01 = 24660 Q13 */ /* Q0.Q15 * Q13 -> Q14 */ tmp = (exp * (-49320)) + (((frac * (-24660)) >> 15) << 1); /* * tmp = meansEner - 10log10(ener_code/L_SUBFR) * = meansEner - 10log10(ener_code) + 10log10(L_SUBFR) * = K - fact * Log2(ener_code) * = K - fact * log2(ener_code) - fact*27 * * ==> K = meansEner + fact*27 + 10log10(L_SUBFR) * * meansEner = 33 = 540672 Q14 (MR475, MR515, MR59) * meansEner = 28.75 = 471040 Q14 (MR67) * meansEner = 30 = 491520 Q14 (MR74) * meansEner = 36 = 589824 Q14 (MR795) * meansEner = 33 = 540672 Q14 (MR102) * 10log10(L_SUBFR) = 16.02 = 262481.51 Q14 * fact * 27 = 1331640 Q14 * ----------------------------------------- * (MR475, MR515, MR59) K = 2134793.51 Q14 ~= 16678 * 64 * 2 * (MR67) K = 2065161.51 Q14 ~= 32268 * 32 * 2 * (MR74) K = 2085641.51 Q14 ~= 32588 * 32 * 2 * (MR795) K = 2183945.51 Q14 ~= 17062 * 64 * 2 * (MR102) K = 2134793.51 Q14 ~= 16678 * 64 * 2 */ if (mode == MR102) { /* mean = 33 dB */ tmp += 2134784; /* Q14 */ } else if (mode == MR795) { /* mean = 36 dB */ tmp += 2183936; /* Q14 */ /* * ener_code = <xn xn> * 2^27*2^exp_code * frac_en = ener_code / 2^16 * = <xn xn> * 2^11*2^exp_code * <xn xn> = <xn xn>*2^11*2^exp * 2^exp_en * := frac_en * 2^exp_en * * ==> exp_en = -11-exp_code; */ *frac_en = ener_code >> 16; *exp_en = -11 - exp_code; } else if (mode == MR74) { /* mean = 30 dB */ tmp += 2085632; /* Q14 */ } else if (mode == MR67) { /* mean = 28.75 dB */ tmp += 2065152; /* Q14 */ } else /* MR59, MR515, MR475 */ { /* mean = 33 dB */ tmp += 2134784; /* Q14 */ } /* * Compute gcode0 * = Sum(i=0,3) pred[i]*past_qua_en[i] - ener_code + meanEner */ tmp = tmp << 9; /* Q23 */ /* Q13 * Q10 -> Q23 */ i = 0; while (i < 4) { tmp += pred[i] * st->past_qua_en[i]; i++; } gcode0 = tmp >> 15; /* Q8 */ /* * gcode0 = pow(10.0, gcode0/20) * = pow(2, 3.3219*gcode0/20) * = pow(2, 0.166*gcode0) */ /* 5439 Q15 = 0.165985 */ /* (correct: 1/(20*log10(2)) 0.166096 = 5443 Q15) */ /* For IS641 bitexactness */ if (mode == MR74) { /* Q8 * Q15 -> Q24 */ tmp = gcode0 * 10878; } else { /* Q8 * Q15 -> Q24 */ tmp = gcode0 * 10886; } tmp = tmp >> 9; /* -> Q15 */ /* -> Q0.Q15 */ *exp_gcode0 = tmp >> 15; *frac_gcode0 = tmp - (*exp_gcode0 * 32768); } } /* * gc_pred_update * * * Parameters: * st->past_qua_en B: MA predictor * st->past_qua_en_MR122 B: MA predictor MR122 * qua_ener_MR122 I: quantized energy for update (log2(quaErr)) * qua_ener I: quantized energy for update (20*log10(quaErr)) * * Function: * Update MA predictor with last quantized energy * * Returns: * void */ __device__ static void gc_pred_update(dec_gc_predState *st, Word32 qua_ener_MR122, Word32 qua_ener) { Word32 i; for (i = 3; i > 0; i--) { st->past_qua_en[i] = st->past_qua_en[i - 1]; st->past_qua_en_MR122[i] = st->past_qua_en_MR122[i - 1]; } st->past_qua_en_MR122[0] = qua_ener_MR122; /* log2 (quaErr), Q10 */ st->past_qua_en[0] = qua_ener; /* 20*log10(quaErr), Q10 */ } /* * Dec_gain * * * Parameters: * pred_state->past_qua_en B: MA predictor * pred_state->past_qua_en_MR122 B: MA predictor MR122 * mode I: AMR mode * index I: index of quantization * code I: Innovative vector * evenSubfr I: Flag for even subframes * gain_pit O: Pitch gain * gain_cod O: Code gain * * Function: * Decode the pitch and codebook gains * * Returns: * void */ __device__ static void Dec_gain(dec_gc_predState *pred_state, enum Mode mode, Word32 index, Word32 code[], Word32 evenSubfr, Word32 *gain_pit, Word32 *gain_cod) { Word32 frac, gcode0, exp, qua_ener, qua_ener_MR122, g_code, tmp; const Word32 *p; /* Read the quantized gains (table depends on mode) */ index = index << 2; if ((mode == MR102) || (mode == MR74) || (mode == MR67)) { p = &table_gain_highrates[index]; *gain_pit = *p++; g_code = *p++; qua_ener_MR122 = *p++; qua_ener = *p; } else { if (mode == MR475) { index = index + ((1 - evenSubfr) << 1); p = &table_gain_MR475[index]; *gain_pit = *p++; g_code = *p++; /* * calculate predictor update values (not stored in 4.75 * quantizer table to save space): * qua_ener = log2(g) * qua_ener_MR122 = 20*log10(g) */ /* Log2(x Q12) = log2(x) + 12 */ Log2(g_code, &exp, &frac); exp = exp - 12; tmp = frac >> 5; if ((frac & ((Word16)1 << 4)) != 0) { tmp++; } qua_ener_MR122 = tmp + (exp << 10); /* 24660 Q12 ~= 6.0206 = 20*log10(2) */ tmp = exp * 49320; tmp += (((frac * 24660) >> 15) << 1); /* Q12 * Q0 = Q13 -> Q10 */ qua_ener = ((tmp << 13) + 0x00008000L) >> 16; } else { p = &table_gain_lowrates[index]; *gain_pit = *p++; g_code = *p++; qua_ener_MR122 = *p++; qua_ener = *p; } } /* * predict codebook gain * gc0 = Pow2(int(d)+frac(d)) * = 2^exp + 2^frac * gcode0 (Q14) = 2^14*2^frac = gc0 * 2^(14-exp) */ gc_pred(pred_state, mode, code, &exp, &frac, NULL, NULL); gcode0 = Pow2(14, frac); /* * read quantized gains, update table of past quantized energies * st->past_qua_en(Q10) = 20 * Log10(gFac) / constant * = Log2(gFac) * = qua_ener * constant = 20*Log10(2) */ if (exp < 11) { *gain_cod = (g_code * gcode0) >> (25 - exp); } else { tmp = ((g_code * gcode0) << (exp - 9)); if ((tmp >> (exp - 9)) != (g_code * gcode0)) { *gain_cod = 0x7FFF; } else { *gain_cod = tmp >> 16; } } /* update table of past quantized energies */ gc_pred_update(pred_state, qua_ener_MR122, qua_ener); return; } /* * gc_pred_average_limited * * * Parameters: * st->past_qua_en I: MA predictor * st->past_qua_en_MR122 I: MA predictor MR122 * ener_avg_MR122 O: everaged quantized energy (log2(quaErr)) * ener_avg O: averaged quantized energy (20*log10(quaErr)) * * Function: * Compute average limited quantized energy * Returns: * void */ __device__ static void gc_pred_average_limited(dec_gc_predState *st, Word32 *ener_avg_MR122, Word32 *ener_avg) { Word32 av_pred_en, i; /* do average in MR122 mode (log2() domain) */ av_pred_en = 0; for (i = 0; i < NPRED; i++) { av_pred_en = (av_pred_en + st->past_qua_en_MR122[i]); } /* av_pred_en = 0.25*av_pred_en */ av_pred_en = (av_pred_en * 8192) >> 15; /* if (av_pred_en < -14/(20Log10(2))) av_pred_en = .. */ if (av_pred_en < MIN_ENERGY_MR122) { av_pred_en = MIN_ENERGY_MR122; } *ener_avg_MR122 = (Word16)av_pred_en; /* do average for other modes (20*log10() domain) */ av_pred_en = 0; for (i = 0; i < NPRED; i++) { av_pred_en = (av_pred_en + st->past_qua_en[i]); if (av_pred_en < -32768) av_pred_en = -32768; else if (av_pred_en > 32767) av_pred_en = 32767; } /* av_pred_en = 0.25*av_pred_en */ av_pred_en = (av_pred_en * 8192) >> 15; *ener_avg = av_pred_en; } /* * ec_gain_code * * * Parameters: * st->gbuf I: last five gains * st->past_gain_code I: past gain * pred_state B: MA predictor state * state I: state of the state machine * gain_code O: decoded innovation gain * * Function: * Conceal the codebook gain * * Returns: * void */ __device__ static void ec_gain_code(ec_gain_codeState *st, dec_gc_predState *pred_state, Word16 state, Word32 *gain_code) { Word32 tmp, qua_ener_MR122, qua_ener; /* calculate median of last five gain values */ tmp = gmed_n(st->gbuf, 5); /* new gain = minimum(median, past_gain) * cdown[state] */ if (tmp > st->past_gain_code) { tmp = st->past_gain_code; } tmp = (tmp * cdown[state]) >> 15; *gain_code = tmp; /* * update table of past quantized energies with average of * current values */ gc_pred_average_limited(pred_state, &qua_ener_MR122, &qua_ener); gc_pred_update(pred_state, qua_ener_MR122, qua_ener); } /* * ec_gain_code_update * * * Parameters: * st->gbuf B: last five gains * st->past_gain_code O: past gain * st->prev_gc B previous gain * bfi I: bad indicator * prev_bf I: previous frame bad indicator * gain_code O: decoded innovation gain * * Function: * Update the codebook gain concealment state * * Returns: * void */ __device__ static void ec_gain_code_update(ec_gain_codeState *st, Word16 bfi, Word16 prev_bf, Word32 *gain_code) { /* limit gain_code by previous good gain if previous frame was bad */ if (bfi == 0) { if (prev_bf != 0) { if (*gain_code > st->prev_gc) { *gain_code = st->prev_gc; } } st->prev_gc = *gain_code; } /* update EC states: previous gain, gain buffer */ st->past_gain_code = *gain_code; st->gbuf[0] = st->gbuf[1]; st->gbuf[1] = st->gbuf[2]; st->gbuf[2] = st->gbuf[3]; st->gbuf[3] = st->gbuf[4]; st->gbuf[4] = *gain_code; return; } /* * d_gain_code * * * Parameters: * pred_state B: MA predictor state * mode I: AMR mode (MR795 or MR122) * index I: received quantization index * code I: innovation codevector * gain_code O: decoded innovation gain * * Function: * Decode the fixed codebook gain using the received index * * Returns: * void */ __device__ static void d_gain_code(dec_gc_predState *pred_state, enum Mode mode, Word32 index, Word32 code[], Word32 *gain_code) { Word32 g_code0, exp, frac, qua_ener_MR122, qua_ener; Word32 exp_inn_en, frac_inn_en, tmp, tmp2, i; const Word32 *p; /* * Decode codebook gain */ gc_pred(pred_state, mode, code, &exp, &frac, &exp_inn_en, &frac_inn_en); p = &qua_gain_code[((index + index) + index)]; /* Different scalings between MR122 and the other modes */ if (mode == MR122) { /* predicted gain */ g_code0 = Pow2(exp, frac); if (g_code0 <= 2047) g_code0 = g_code0 << 4; else g_code0 = 32767; *gain_code = ((g_code0 * *p++) >> 15) << 1; if (*gain_code & 0xFFFF8000) *gain_code = 32767; } else { g_code0 = Pow2(14, frac); tmp = (*p++ * g_code0) << 1; exp = 9 - exp; if (exp > 0) { tmp = tmp >> exp; } else { for (i = exp; i < 0; i++) { tmp2 = tmp << 1; if ((tmp ^ tmp2) & 0x80000000) { tmp = (tmp & 0x80000000) ? 0x80000000 : 0x7FFFFFFF; break; } else { tmp = tmp2; } } } *gain_code = tmp >> 16; if (*gain_code & 0xFFFF8000) *gain_code = 32767; } /* * update table of past quantized energies */ qua_ener_MR122 = *p++; qua_ener = *p++; gc_pred_update(pred_state, qua_ener_MR122, qua_ener); return; } /* * Int_lsf * * * Parameters: * lsf_old I: LSF vector at the 4th subframe of past frame * lsf_new I: LSF vector at the 4th subframe of present frame * i_subfr I: current subframe * lsf_out O: interpolated LSF parameters for current subframe * * Function: * Interpolates the LSFs for selected subframe * * The LSFs are interpolated at the 1st, 2nd and 3rd * ubframe and only forwarded at the 4th subframe. * * sf1: 3/4 F0 + 1/4 F1 * sf2: 1/2 F0 + 1/2 F1 * sf3: 1/4 F0 + 3/4 F1 * sf4: F1 * * Returns: * void */ __device__ static void Int_lsf(Word32 lsf_old[], Word32 lsf_new[], int i_subfr, Word32 lsf_out[]) { Word32 i; switch (i_subfr) { case 0: for (i = 0; i < 10; i++) { lsf_out[i] = lsf_old[i] - (lsf_old[i] >> 2) + (lsf_new[i] >> 2); } break; case 40: for (i = 0; i < 10; i++) { lsf_out[i] = (lsf_old[i] >> 1) + (lsf_new[i] >> 1); } break; case 80: for (i = 0; i < 10; i++) { lsf_out[i] = (lsf_old[i] >> 2) - (lsf_new[i] >> 2) + lsf_new[i]; } break; case 120: memcpy(lsf_out, lsf_new, M << 2); break; } } /* * Cb_gain_average * * * Parameters: * st->cbGainHistory B: codebook gain history * st->hangCount B: hangover counter * mode I: AMR mode * gain_code I: codebook gain * lsp I: The LSP for the current frame * lspAver I: The average of LSP for 8 frames * bfi I: bad frame indication * prev_bf I: previous bad frame indication * pdfi I: potential degraded bad frame indication * prev_pdf I: previous potential degraded bad frame indication * inBackgroundNoise I: background noise decision * voicedHangover I: number of frames after last voiced frame * * Function: * The mixed codebook gain, used to make codebook gain more smooth in background * * * Returns: * void */ __device__ static Word32 Cb_gain_average(Cb_gain_averageState *st, enum Mode mode, Word32 gain_code, Word32 lsp[], Word32 lspAver[], Word16 bfi, Word16 prev_bf, Word16 pdfi, Word16 prev_pdf, Word32 inBackgroundNoise, Word32 voicedHangover) { Word32 tmp[M]; Word32 i, cbGainMix, tmp_diff, bgMix, cbGainMean, sum, diff, tmp1, tmp2; int shift1, shift2, shift; /* set correct cbGainMix for MR74, MR795, MR122 */ cbGainMix = gain_code; /* * Store list of CB gain needed in the CB gain averaging * */ st->cbGainHistory[0] = st->cbGainHistory[1]; st->cbGainHistory[1] = st->cbGainHistory[2]; st->cbGainHistory[2] = st->cbGainHistory[3]; st->cbGainHistory[3] = st->cbGainHistory[4]; st->cbGainHistory[4] = st->cbGainHistory[5]; st->cbGainHistory[5] = st->cbGainHistory[6]; st->cbGainHistory[6] = gain_code; /* compute lsp difference */ for (i = 0; i < M; i++) { tmp1 = labs(lspAver[i] - lsp[i]); shift1 = 0; if (tmp1 != 0) { while (!(tmp1 & 0x2000)) { shift1++; tmp1 = tmp1 << 1; } } tmp2 = lspAver[i]; shift2 = 0; if (tmp2 != 0) { while (!(tmp2 & 0x4000)) { shift2++; tmp2 = tmp2 << 1; } } tmp[i] = (tmp1 << 15) / tmp2; shift = 2 + shift1 - shift2; if (shift >= 0) { tmp[i] = tmp[i] >> shift; } else { tmp[i] = tmp[i] << -(shift); } } diff = *tmp + tmp[1] + tmp[2] + tmp[3] + tmp[4] + tmp[5] + tmp[6] + tmp[7] + tmp[8] + tmp[9]; /* saturate */ if (diff > 32767) { diff = 32767; } /* Compute hangover */ st->hangVar += 1; if (diff <= 5325) { st->hangVar = 0; } if (st->hangVar > 10) { /* Speech period, reset hangover variable */ st->hangCount = 0; } /* Compute mix constant (bgMix) */ bgMix = 8192; /* MR475, MR515, MR59, MR67, MR102 */ if ((mode <= MR67) | (mode == MR102)) { /* disable mix if too short time since */ if ((st->hangCount >= 40) & (diff <= 5325)) /* 0.65 in Q13 */ { /* if errors and presumed noise make smoothing probability stronger */ if (((((pdfi != 0) & (prev_pdf != 0)) | (bfi != 0) | ( prev_bf != 0)) & ((voicedHangover > 1)) & ( inBackgroundNoise != 0) & (mode < MR67))) { /* bgMix = min(0.25, max(0.0, diff-0.55)) / 0.25; */ tmp_diff = diff - 4506; /* 0.55 in Q13 */ /* max(0.0, diff-0.55) */ tmp1 = 0; if (tmp_diff > 0) { tmp1 = tmp_diff; } /* min(0.25, tmp1) */ if (2048 >= tmp1) { bgMix = tmp1 << 2; } } else { /* bgMix = min(0.25, max(0.0, diff-0.40)) / 0.25; */ tmp_diff = diff - 3277; /* 0.4 in Q13 */ /* max(0.0, diff-0.40) */ tmp1 = 0; if (tmp_diff > 0) { tmp1 = tmp_diff; } /* min(0.25, tmp1) */ if (2048 >= tmp1) { bgMix = tmp1 << 2; } } } /* * Smoothen the cb gain trajectory * smoothing depends on mix constant bgMix */ sum = st->cbGainHistory[2] + st->cbGainHistory[3] + st->cbGainHistory[4] + st->cbGainHistory[5] + st->cbGainHistory[6]; if (sum > 163822) { cbGainMean = 32767; } else { cbGainMean = (3277 * sum + 0x00002000L) >> 14; /* Q1 */ } /* more smoothing in error and bg noise (NB no DFI used here) */ if (((bfi != 0) | (prev_bf != 0)) & (inBackgroundNoise != 0) & ( mode < MR67)) { sum = 9362 * (st->cbGainHistory[0] + st->cbGainHistory[1] + st-> cbGainHistory[2] + st->cbGainHistory[3] + st->cbGainHistory[4] + st->cbGainHistory[5] + st->cbGainHistory[6]); cbGainMean = (sum + 0x00008000L) >> 16; /* Q1 */ } /* cbGainMix = bgMix*cbGainMix + (1-bgMix)*cbGainMean; */ sum = bgMix * cbGainMix; /* sum in Q14 */ sum += cbGainMean << 13; sum -= bgMix * cbGainMean; cbGainMix = (sum + 0x00001000L) >> 13; /* Q1 */ } st->hangCount += 1; if (st->hangCount & 0x80000000) st->hangCount = 40; return cbGainMix; } /* * ph_disp * * * Parameters: * state->gainMem B: LTP gain memory * state->prevCbGain B: Codebook gain memory * mode I: AMR mode * x B: LTP excitation signal -> total excitation signal * cbGain I: Codebook gain * ltpGain I: LTP gain * inno B: Innovation vector * pitch_fac I: pitch factor used to scale the LTP excitation * tmp_shift I: shift factor applied to sum of scaled LTP ex & innov. * before rounding * * Function: * Adaptive phase dispersion; forming of total excitation * * * Returns: * void */ __device__ static void ph_disp(ph_dispState *state, enum Mode mode, Word32 x[], Word32 cbGain, Word32 ltpGain, Word32 inno[], Word32 pitch_fac, Word32 tmp_shift) { Word32 inno_sav[L_SUBFR], ps_poss[L_SUBFR]; Word32 i, i1, impNr, temp1, temp2, j, nze, nPulse, ppos; const Word32 *ph_imp; /* Pointer to phase dispersion filter */ /* Update LTP gain memory */ state->gainMem[4] = state->gainMem[3]; state->gainMem[3] = state->gainMem[2]; state->gainMem[2] = state->gainMem[1]; state->gainMem[1] = state->gainMem[0]; state->gainMem[0] = ltpGain; /* basic adaption of phase dispersion */ /* no dispersion */ impNr = 2; /* if (ltpGain < 0.9) */ if (ltpGain < PHDTHR2LTP) { /* maximum dispersion */ impNr = 0; /* if (ltpGain > 0.6 */ if (ltpGain > PHDTHR1LTP) { /* medium dispersion */ impNr = 1; } } /* onset indicator */ /* onset = (cbGain > onFact * cbGainMem[0]) */ temp1 = ((state->prevCbGain * ONFACTPLUS1) + 0x1000) >> 13; if (cbGain > temp1) { state->onset = ONLENGTH; } else { if (state->onset > 0) { state->onset--; } } /* * if not onset, check ltpGain buffer and use max phase dispersion if * half or more of the ltpGain-parameters say so */ if (state->onset == 0) { /* Check LTP gain memory and set filter accordingly */ i1 = 0; for (i = 0; i < PHDGAINMEMSIZE; i++) { if (state->gainMem[i] < PHDTHR1LTP) { i1++; } } if (i1 > 2) { impNr = 0; } } /* Restrict decrease in phase dispersion to one step if not onset */ if ((impNr > (state->prevState + 1)) & (state->onset == 0)) { impNr--; } /* if onset, use one step less phase dispersion */ if ((impNr<2)&(state->onset>0)) { impNr++; } /* disable for very low levels */ if (cbGain < 10) { impNr = 2; } if (state->lockFull == 1) { impNr = 0; } /* update static memory */ state->prevState = impNr; state->prevCbGain = cbGain; /* * do phase dispersion for all modes but 12.2 and 7.4; * don't modify the innovation if impNr >=2 (= no phase disp) */ if ((mode != MR122) & (mode != MR102) & (mode != MR74) & (impNr < 2) ) { /* * track pulse positions, save innovation, * and initialize new innovation */ nze = 0; for (i = 0; i < L_SUBFR; i++) { if (inno[i] != 0) { ps_poss[nze] = i; nze++; } } memcpy(inno_sav, inno, L_SUBFR << 2); memset(inno, 0, L_SUBFR << 2); /* Choose filter corresponding to codec mode and dispersion criterium */ ph_imp = ph_imp_mid; if (impNr == 0) { ph_imp = ph_imp_low; } if (mode == MR795) { ph_imp = ph_imp_mid_MR795; if (impNr == 0) { ph_imp = ph_imp_low_MR795; } } /* Do phase dispersion of innovation */ for (nPulse = 0; nPulse < nze; nPulse++) { ppos = ps_poss[nPulse]; /* circular convolution with impulse response */ j = 0; for (i = ppos; i < L_SUBFR; i++) { /* inno[i1] += inno_sav[ppos] * ph_imp[i1-ppos] */ temp1 = (inno_sav[ppos] * ph_imp[j++]) >> 15; inno[i] = inno[i] + temp1; } for (i = 0; i < ppos; i++) { /* inno[i] += inno_sav[ppos] * ph_imp[L_SUBFR-ppos+i] */ temp1 = (inno_sav[ppos] * ph_imp[j++]) >> 15; inno[i] = inno[i] + temp1; } } } /* * compute total excitation for synthesis part of decoder * (using modified innovation if phase dispersion is active) */ for (i = 0; i < L_SUBFR; i++) { /* x[i] = gain_pit*x[i] + cbGain*code[i]; */ temp1 = x[i] * pitch_fac + inno[i] * cbGain; temp2 = temp1 << tmp_shift; x[i] = (temp2 + 0x4000) >> 15; if (labs(x[i]) > 32767) { if ((temp1 ^ temp2) & 0x80000000) { x[i] = (temp1 & 0x80000000) ? -32768 : 32767; } else { x[i] = (temp2 & 0x80000000) ? -32768 : 32767; } } } return; } /* * sqrt_l_exp * * * Parameters: * x I: input value * exp O: right shift to be applied to result * * Function: * Sqrt with exponent value. * * y = sqrt(x) * x = f * 2^-e, 0.5 <= f < 1 (normalization) * y = sqrt(f) * 2^(-e/2) * * a) e = 2k --> y = sqrt(f) * 2^-k * (k = e div 2, 0.707 <= sqrt(f) < 1) * b) e = 2k+1 --> y = sqrt(f/2) * 2^-k * (k = e div 2, 0.5 <= sqrt(f/2) < 0.707) * * * Returns: * y output value */ __device__ static Word32 sqrt_l_exp(Word32 x, Word32 *exp) { Word32 y, a, i, tmp; int e; if (x <= (Word32)0) { *exp = 0; return(Word32)0; } e = 0; if (x != 0) { tmp = x; while (!(tmp & 0x40000000)) { e++; tmp = tmp << 1; } } e = e & 0xFFFE; x = (x << e); *exp = (Word16)e; x = (x >> 9); i = (Word16)(x >> 16); x = (x >> 1); a = x & (Word16)0x7fff; i = (i - 16); y = (sqrt_table[i] << 16); tmp = (sqrt_table[i] - sqrt_table[i + 1]); y -= (tmp * a) << 1; return(y); } /* * Ex_ctrl * * * Parameters: * excitation B: Current subframe excitation * excEnergy I: Exc. Energy, sqrt(totEx*totEx) * exEnergyHist I: History of subframe energies * voicedHangover I: number of frames after last voiced frame * prevBFI I: Set i previous bad frame indicators * carefulFlag I: Restrict dymamic in scaling * * Function: * Charaterice synthesis speech and detect background noise * * Returns: * background noise decision; 0 = no bgn, 1 = bgn */ __device__ static Word16 Ex_ctrl(Word32 excitation[], Word32 excEnergy, Word32 exEnergyHist[], Word32 voicedHangover, Word16 prevBFI, Word16 carefulFlag ) { Word32 i, testEnergy, scaleFactor, avgEnergy, prevEnergy, T0; int exp; /* get target level */ avgEnergy = gmed_n(exEnergyHist, 9); prevEnergy = (exEnergyHist[7] + exEnergyHist[8]) >> 1; if (exEnergyHist[8] < prevEnergy) { prevEnergy = exEnergyHist[8]; } /* upscaling to avoid too rapid energy rises for some cases */ if ((excEnergy<avgEnergy)&(excEnergy>5)) { /* testEnergy = 4*prevEnergy; */ testEnergy = prevEnergy << 2; if ((voicedHangover < 7) || prevBFI != 0) { /* testEnergy = 3*prevEnergy */ testEnergy = testEnergy - prevEnergy; } if (avgEnergy > testEnergy) { avgEnergy = testEnergy; } /* scaleFactor=avgEnergy/excEnergy in Q0 */ exp = 0; if (excEnergy != 0) { while (!(excEnergy & 0x4000)) { exp++; excEnergy = excEnergy << 1; } } excEnergy = 536838144 / excEnergy; T0 = (avgEnergy * excEnergy) << 1; T0 = (T0 >> (20 - exp)); if (T0 > 32767) { /* saturate */ T0 = 32767; } scaleFactor = T0; /* test if scaleFactor > 3.0 */ if ((carefulFlag != 0) & (scaleFactor > 3072)) { scaleFactor = 3072; } /* scale the excitation by scaleFactor */ for (i = 0; i < L_SUBFR; i++) { T0 = (scaleFactor * excitation[i]) << 1; T0 = (T0 >> 11); excitation[i] = T0; } } return 0; } /* * Inv_sqrt * * * Parameters: * x I: input value * * Function: * 1/sqrt(x) * * Returns: * y 1/sqrt(x) */ __device__ static Word32 Inv_sqrt(Word32 x) { int i, a, tmp, exp; Word32 y; if (x <= (Word32)0) return((Word32)0x3fffffffL); exp = 0; while (!(x & 0x40000000)) { exp++; x = x << 1; } /* x is normalized */ exp = (30 - exp); /* If exponent even -> shift right */ if ((exp & 1) == 0) { x = (x >> 1); } exp = (exp >> 1); exp = (exp + 1); x = (x >> 9); /* Extract b25-b31 */ i = (Word16)(x >> 16); /* Extract b10-b24 */ x = (x >> 1); a = x & (Word16)0x7fff; i = (i - 16); /* table[i] << 16 */ y = inv_sqrt_table[i] << 16; /* table[i] - table[i+1]) */ tmp = (inv_sqrt_table[i] - inv_sqrt_table[i + 1]); /* y -= tmp*a*2 */ y -= (tmp * a) << 1; /* denormalization */ y = (y >> exp); return(y); } /* * energy_old * * * Parameters: * in I: input value * * Function: * Energy of signal * * Returns: * Energy */ __device__ static Word32 energy_old(Word32 in[]) { Word32 temp, i, sum = 0; for (i = 0; i < L_SUBFR; i += 8) { temp = in[i] >> 2; sum += temp * temp; temp = in[i + 1] >> 2; sum += temp * temp; temp = in[i + 2] >> 2; sum += temp * temp; temp = in[i + 3] >> 2; sum += temp * temp; temp = in[i + 4] >> 2; sum += temp * temp; temp = in[i + 5] >> 2; sum += temp * temp; temp = in[i + 6] >> 2; sum += temp * temp; temp = in[i + 7] >> 2; sum += temp * temp; } if (sum & 0xC0000000) { return 0x7FFFFFFF; } return(sum << 1); } /* * energy_new * * * Parameters: * in I: input value * * Function: * Energy of signal * * Returns: * Energy */ __device__ static Word32 energy_new(Word32 in[]) { Word32 i, s = 0, overflow = 0; s += in[0] * in[0]; for (i = 1; i < L_SUBFR; i += 3) { s += in[i] * in[i]; s += in[i + 1] * in[i + 1]; s += in[i + 2] * in[i + 2]; if (s & 0xC0000000) { overflow = 1; break; } } /* check for overflow */ if (overflow) { s = energy_old(in); } else { s = (s >> 3); } return s; } /* * agc2 * * * Parameters: * sig_in I: Post_Filter input signal * sig_out B: Post_Filter output signal * * Function: * Scales the excitation on a subframe basis * * Returns: * Energy */ __device__ static void agc2(Word32 *sig_in, Word32 *sig_out) { Word32 s; int i, exp; Word16 gain_in, gain_out, g0; /* calculate gain_out with exponent */ s = energy_new(sig_out); if (s == 0) { return; } exp = 0; while (!(s & 0x20000000)) { exp++; s = s << 1; } gain_out = (Word16)((s + 0x00008000L) >> 16); /* calculate gain_in with exponent */ s = energy_new(sig_in); if (s == 0) { g0 = 0; } else { i = 0; while (!(s & 0x40000000)) { i++; s = s << 1; } if (s < 0x7fff7fff) gain_in = (Word16)((s + 0x00008000L) >> 16); else gain_in = 32767; exp = (exp - i); /* * g0 = sqrt(gain_in/gain_out); */ /* s = gain_out / gain_in */ s = (gain_out << 15) / gain_in; s = (s << 7); if (exp > 0) s = (s >> exp); else s = (s << (-exp)); s = Inv_sqrt(s); g0 = (Word16)(((s << 9) + 0x00008000L) >> 16); } /* sig_out(n) = gain(n) * sig_out(n) */ for (i = 0; i < L_SUBFR; i++) { sig_out[i] = (sig_out[i] * g0) >> 12; } return; } /* * Bgn_scd * * * Parameters: * st->frameEnergyHist B: Frame Energy memory * st->bgHangover B: Background hangover counter * ltpGainHist I: LTP gain history * speech I: synthesis speech frame * voicedHangover O: number of frames after last voiced frame * * Function: * Charaterice synthesis speech and detect background noise * * Returns: * inbgNoise background noise decision; 0 = no bgn, 1 = bgn */ __device__ static Word16 Bgn_scd(Bgn_scdState *st, Word32 ltpGainHist[], Word32 speech[], Word32 *voicedHangover) { Word32 temp, ltpLimit, frame_energyMin, currEnergy, noiseFloor, maxEnergy, maxEnergyLastPart, s, i; Word16 prevVoiced, inbgNoise; /* * Update the inBackgroundNoise flag (valid for use in next frame if BFI) * it now works as a energy detector floating on top * not as good as a VAD. */ s = 0; for (i = 0; i < L_FRAME; i++) { s += speech[i] * speech[i]; } if ((s < 0xFFFFFFF) & (s >= 0)) currEnergy = s >> 13; else currEnergy = 32767; frame_energyMin = 32767; for (i = 0; i < L_ENERGYHIST; i++) { if (st->frameEnergyHist[i] < frame_energyMin) frame_energyMin = st->frameEnergyHist[i]; } /* Frame Energy Margin of 16 */ noiseFloor = frame_energyMin << 4; maxEnergy = st->frameEnergyHist[0]; for (i = 1; i < L_ENERGYHIST - 4; i++) { if (maxEnergy < st->frameEnergyHist[i]) { maxEnergy = st->frameEnergyHist[i]; } } maxEnergyLastPart = st->frameEnergyHist[2 * L_ENERGYHIST / 3]; for (i = 2 * L_ENERGYHIST / 3 + 1; i < L_ENERGYHIST; i++) { if (maxEnergyLastPart < st->frameEnergyHist[i]) { maxEnergyLastPart = st->frameEnergyHist[i]; } } /* false */ inbgNoise = 0; /* * Do not consider silence as noise * Do not consider continuous high volume as noise * Or if the current noise level is very low * Mark as noise if under current noise limit * OR if the maximum energy is below the upper limit */ if ((maxEnergy> LOWERNOISELIMIT)&(currEnergy<FRAMEENERGYLIMIT)&( currEnergy>LOWERNOISELIMIT) & ((currEnergy < noiseFloor) || ( maxEnergyLastPart < UPPERNOISELIMIT))) { if ((st->bgHangover + 1) > 30) { st->bgHangover = 30; } else { st->bgHangover += 1; } } else { st->bgHangover = 0; } /* make final decision about frame state, act somewhat cautiosly */ if (st->bgHangover > 1) inbgNoise = 1; /* true */ for (i = 0; i < L_ENERGYHIST - 1; i++) { st->frameEnergyHist[i] = st->frameEnergyHist[i + 1]; } st->frameEnergyHist[L_ENERGYHIST - 1] = currEnergy; /* * prepare for voicing decision; * tighten the threshold after some time in noise */ ltpLimit = 13926; /* 0.85 Q14 */ if (st->bgHangover > 8) { ltpLimit = 15565; /* 0.95 Q14 */ } if (st->bgHangover > 15) { ltpLimit = 16383; /* 1.00 Q14 */ } /* weak sort of voicing indication. */ prevVoiced = 0; /* false */ if (gmed_n(&ltpGainHist[4], 5) > ltpLimit) { prevVoiced = 1; /* true */ } if (st->bgHangover > 20) { if (gmed_n(ltpGainHist, 9) > ltpLimit) { prevVoiced = 1; /* true */ } else { prevVoiced = 0; /* false */ } } if (prevVoiced) { *voicedHangover = 0; } else { temp = *voicedHangover + 1; if (temp > 10) { *voicedHangover = 10; } else { *voicedHangover = temp; } } return inbgNoise; } /* * dtx_dec_activity_update * * * Parameters: * st->lsf_hist_ptr B: LSF history pointer * st->lsf_hist B: LSF history * lsf I: lsf * frame I: noise frame * * Function: * Update lsp history and compute log energy. * * Returns: * void */ __device__ static void dtx_dec_activity_update(dtx_decState *st, Word32 lsf[], Word32 frame[]) { Word32 frame_en; Word32 log_en_e, log_en_m, log_en, i; /* update lsp history */ st->lsf_hist_ptr += M; if (st->lsf_hist_ptr == 80) { st->lsf_hist_ptr = 0; } memcpy(&st->lsf_hist[st->lsf_hist_ptr], lsf, M << 2); /* compute log energy based on frame energy */ frame_en = 0; /* Q0 */ for (i = 0; (i < L_FRAME); i++) { frame_en += frame[i] * frame[i]; if (frame_en & 0x80000000) break; } log_en = (frame_en & 0xC0000000) ? 0x7FFFFFFE : (Word32)frame_en << 1; Log2(log_en, &log_en_e, &log_en_m); /* convert exponent and mantissa to Word16 Q10 */ log_en = log_en_e << 10; /* Q10 */ log_en = log_en + (log_en_m >> 5); /* divide with L_FRAME i.e subtract with log2(L_FRAME) = 7.32193 */ log_en = log_en - 8521; /* * insert into log energy buffer, no division by two as * log_en in decoder is Q11 */ st->log_en_hist_ptr += 1; if (st->log_en_hist_ptr == DTX_HIST_SIZE) { st->log_en_hist_ptr = 0; } st->log_en_hist[st->log_en_hist_ptr] = log_en; /* Q11 */ } /* * Decoder_amr * * * Parameters: * st B: State variables * mode I: AMR mode * parm I: vector of synthesis parameters * frame_type I: received frame type * synth O: synthesis speech * A_t O: decoded LP filter in 4 subframes * * Function: * Speech decoder routine * * Returns: * void */ __device__ static void Decoder_amr(Decoder_amrState *st, enum Mode mode, Word16 parm[], enum RXFrameType frame_type, Word32 synth[], Word32 A_t[]) { /* LSPs */ Word32 lsp_new[M]; Word32 lsp_mid[M]; /* LSFs */ Word32 prev_lsf[M]; Word32 lsf_i[M]; /* Algebraic codevector */ Word32 code[L_SUBFR]; /* excitation */ Word32 excp[L_SUBFR]; Word32 exc_enhanced[L_SUBFR]; /* Scalars */ Word32 i, i_subfr, overflow, T0_frac, index, temp, temp2, subfrNr, excEnergy; Word32 gain_code, gain_code_mix, pit_sharp, pit_flag, pitch_fac, t0_min, t0_max; Word32 gain_pit = 0, evenSubfr = 0, T0 = 0, index_mr475 = 0; Word32 *Az; /* Pointer on A_t */ Word16 flag4, carefulFlag; Word16 delta_frc_low, delta_frc_range, tmp_shift; Word16 bfi = 0, pdfi = 0; /* bad frame indication flag, potential degraded bad frame flag */ enum DTXStateType newDTXState; /* SPEECH , DTX, DTX_MUTE */ /* find the new DTX state SPEECH OR DTX */ newDTXState = rx_dtx_handler(&st->dtxDecoderState, frame_type); /* DTX actions */ if (newDTXState != SPEECH) { Decoder_amr_reset(st, MRDTX); dtx_dec(&st->dtxDecoderState, st->mem_syn, &st->lsfState, &st->pred_state, &st->Cb_gain_averState, newDTXState, mode, parm, synth, A_t); /* update average lsp */ Lsf_lsp(st->lsfState.past_lsf_q, st->lsp_old); lsp_avg(&st->lsp_avg_st, st->lsfState.past_lsf_q); goto theEnd; } /* SPEECH action state machine */ if (table_speech_bad[frame_type]) { bfi = 1; if (frame_type != RX_SPEECH_BAD) { Build_CN_param(&st->nodataSeed, mode, parm); } } else if (frame_type == RX_SPEECH_DEGRADED) { pdfi = 1; } if (bfi != 0) { st->state += 1; } else if (st->state == 6) { st->state = 5; } else { st->state = 0; } if (st->state > 6) { st->state = 6; } /* * If this frame is the first speech frame after CNI period, * set the BFH state machine to an appropriate state depending * on whether there was DTX muting before start of speech or not * If there was DTX muting, the first speech frame is muted. * If there was no DTX muting, the first speech frame is not * muted. The BFH state machine starts from state 5, however, to * keep the audible noise resulting from a SID frame which is * erroneously interpreted as a good speech frame as small as * possible (the decoder output in this case is quickly muted) */ if (st->dtxDecoderState.dtxGlobalState == DTX) { st->state = 5; st->prev_bf = 0; } else if (st->dtxDecoderState.dtxGlobalState == DTX_MUTE) { st->state = 5; st->prev_bf = 1; } /* save old LSFs for CB gain smoothing */ memcpy(prev_lsf, st->lsfState.past_lsf_q, M << 2); /* * decode LSF parameters and generate interpolated lpc coefficients * for the 4 subframes */ if (mode != MR122) { D_plsf_3(&st->lsfState, mode, bfi, parm, lsp_new); /* Advance synthesis parameters pointer */ parm += 3; Int_lpc_1to3(st->lsp_old, lsp_new, A_t); } else { D_plsf_5(&st->lsfState, bfi, parm, lsp_mid, lsp_new); /* Advance synthesis parameters pointer */ parm += 5; Int_lpc_1and3(st->lsp_old, lsp_mid, lsp_new, A_t); } /* update the LSPs for the next frame */ memcpy(st->lsp_old, lsp_new, M << 2); /* * Loop for every subframe in the analysis frame * * The subframe size is L_SUBFR and the loop is repeated * L_FRAME/L_SUBFR times * * - decode the pitch delay * - decode algebraic code * - decode pitch and codebook gains * - find the excitation and compute synthesis speech */ /* pointer to interpolated LPC parameters */ Az = A_t; evenSubfr = 0; subfrNr = -1; for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR) { subfrNr += 1; evenSubfr = 1 - evenSubfr; /* flag for first and 3th subframe */ pit_flag = i_subfr; if (i_subfr == L_FRAME_BY2) { if ((mode != MR475) & (mode != MR515)) { pit_flag = 0; } } /* pitch index */ index = *parm++; /* * decode pitch lag and find adaptive codebook vector. */ if (mode != MR122) { /* * flag4 indicates encoding with 4 bit resolution; * this is needed for mode MR475, MR515, MR59 and MR67 */ flag4 = 0; if ((mode == MR475) || (mode == MR515) || (mode == MR59) || ( mode == MR67)) { flag4 = 1; } /* * get ranges for the t0_min and t0_max * only needed in delta decoding */ delta_frc_low = 5; delta_frc_range = 9; if (mode == MR795) { delta_frc_low = 10; delta_frc_range = 19; } t0_min = st->old_T0 - delta_frc_low; if (t0_min < PIT_MIN) { t0_min = PIT_MIN; } t0_max = t0_min + delta_frc_range; if (t0_max > PIT_MAX) { t0_max = PIT_MAX; t0_min = t0_max - delta_frc_range; } Dec_lag3(index, t0_min, t0_max, pit_flag, st->old_T0, &T0, &T0_frac, flag4); st->T0_lagBuff = T0; if (bfi != 0) { if (st->old_T0 < PIT_MAX) { /* Graceful pitch degradation */ st->old_T0 += 1; } T0 = st->old_T0; T0_frac = 0; if ((st->inBackgroundNoise != 0) & (st->voicedHangover > 4) & ( (mode == MR475) || (mode == MR515) || (mode == MR59))) { T0 = st->T0_lagBuff; } } Pred_lt_3or6_40(&st->old_exc[st->exc], T0, T0_frac, 1); } else { Dec_lag6(index, PIT_MIN_MR122, PIT_MAX, pit_flag, &T0, &T0_frac); if ((bfi != 0) || ((pit_flag != 0) & (index > 60))) { st->T0_lagBuff = T0; T0 = st->old_T0; T0_frac = 0; } Pred_lt_3or6_40(&st->old_exc[st->exc], T0, T0_frac, 0); } /* * (MR122 only: Decode pitch gain.) * Decode innovative codebook. * set pitch sharpening factor */ /* MR475, MR515 */ if ((mode == MR475) || (mode == MR515)) { /* index of position */ index = *parm++; /* signs */ i = *parm++; decode_2i40_9bits(subfrNr, i, index, code); pit_sharp = st->sharp << 1; } /* MR59 */ else if (mode == MR59) { /* index of position */ index = *parm++; /* signs */ i = *parm++; decode_2i40_11bits(i, index, code); pit_sharp = st->sharp << 1; } /* MR67 */ else if (mode == MR67) { /* index of position */ index = *parm++; /* signs */ i = *parm++; decode_3i40_14bits(i, index, code); pit_sharp = st->sharp << 1; } /* MR74, MR795 */ else if (mode <= MR795) { /* index of position */ index = *parm++; /* signs */ i = *parm++; decode_4i40_17bits(i, index, code); pit_sharp = st->sharp << 1; } /* MR102 */ else if (mode == MR102) { decode_8i40_31bits(parm, code); parm += 7; pit_sharp = st->sharp << 1; } /* MR122 */ else { index = *parm++; if (bfi != 0) { ec_gain_pitch(&st->ec_gain_p_st, st->state, &gain_pit); } else { gain_pit = d_gain_pitch(mode, index); } ec_gain_pitch_update(&st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit); decode_10i40_35bits(parm, code); parm += 10; /* * pit_sharp = gain_pit; * if (pit_sharp > 1.0) pit_sharp = 1.0; */ pit_sharp = gain_pit; if (pit_sharp > 16383) pit_sharp = 32767; else pit_sharp *= 2; } /* * Add the pitch contribution to code[]. */ for (i = T0; i < L_SUBFR; i++) { temp = (code[i - T0] * pit_sharp) >> 15; code[i] = code[i] + temp; } /* * Decode codebook gain (MR122) or both pitch * gain and codebook gain (all others) * Update pitch sharpening "sharp" with quantized gain_pit */ if (mode == MR475) { /* read and decode pitch and code gain */ if (evenSubfr != 0) { /* index of gain(s) */ index_mr475 = *parm++; } if (bfi == 0) { Dec_gain(&st->pred_state, mode, index_mr475, code, evenSubfr, & gain_pit, &gain_code); } else { ec_gain_pitch(&st->ec_gain_p_st, st->state, &gain_pit); ec_gain_code(&st->ec_gain_c_st, &st->pred_state, st->state, & gain_code); } ec_gain_pitch_update(&st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit); ec_gain_code_update(&st->ec_gain_c_st, bfi, st->prev_bf, &gain_code); pit_sharp = gain_pit; if (pit_sharp > SHARPMAX) { pit_sharp = SHARPMAX; } } else if ((mode <= MR74) || (mode == MR102)) { /* read and decode pitch and code gain */ /* index of gain(s) */ index = *parm++; if (bfi == 0) { Dec_gain(&st->pred_state, mode, index, code, evenSubfr, &gain_pit, & gain_code); } else { ec_gain_pitch(&st->ec_gain_p_st, st->state, &gain_pit); ec_gain_code(&st->ec_gain_c_st, &st->pred_state, st->state, & gain_code); } ec_gain_pitch_update(&st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit); ec_gain_code_update(&st->ec_gain_c_st, bfi, st->prev_bf, &gain_code); pit_sharp = gain_pit; if (pit_sharp > SHARPMAX) { pit_sharp = SHARPMAX; } if (mode == MR102) { if (st->old_T0 > (L_SUBFR + 5)) { pit_sharp = pit_sharp >> 2; } } } else { /* read and decode pitch gain */ /* index of gain(s) */ index = *parm++; if (mode == MR795) { /* decode pitch gain */ if (bfi != 0) { ec_gain_pitch(&st->ec_gain_p_st, st->state, &gain_pit); } else { gain_pit = d_gain_pitch(mode, index); } ec_gain_pitch_update(&st->ec_gain_p_st, bfi, st->prev_bf, &gain_pit ); /* read and decode code gain */ index = *parm++; if (bfi == 0) { d_gain_code(&st->pred_state, mode, index, code, &gain_code); } else { ec_gain_code(&st->ec_gain_c_st, &st->pred_state, st->state, & gain_code); } ec_gain_code_update(&st->ec_gain_c_st, bfi, st->prev_bf, &gain_code ); pit_sharp = gain_pit; if (pit_sharp > SHARPMAX) { pit_sharp = SHARPMAX; } } else { /* MR122 */ if (bfi == 0) { d_gain_code(&st->pred_state, mode, index, code, &gain_code); } else { ec_gain_code(&st->ec_gain_c_st, &st->pred_state, st->state, & gain_code); } ec_gain_code_update(&st->ec_gain_c_st, bfi, st->prev_bf, &gain_code ); pit_sharp = gain_pit; } } /* * store pitch sharpening for next subframe * (for modes which use the previous pitch gain for * pitch sharpening in the search phase) * do not update sharpening in even subframes for MR475 */ if ((mode != MR475) || evenSubfr == 0) { st->sharp = gain_pit; if (st->sharp > SHARPMAX) { st->sharp = SHARPMAX; } } if (pit_sharp > 16383) pit_sharp = 32767; else pit_sharp *= 2; if (pit_sharp > 16384) { for (i = 0; i < L_SUBFR; i++) { temp = (st->old_exc[st->exc + i] * pit_sharp) >> 15; temp2 = (temp * gain_pit) << 1; if (mode == MR122) { temp2 = (temp2 >> 1); } excp[i] = (temp2 + 0x00008000L) >> 16; } } /* * Store list of LTP gains needed in the source * characteristic detector (SCD) */ if (bfi == 0) { for (i = 0; i < 8; i++) { st->ltpGainHistory[i] = st->ltpGainHistory[i + 1]; } st->ltpGainHistory[8] = gain_pit; } /* * Limit gain_pit if in background noise and BFI * for MR475, MR515, MR59 */ if ((st->prev_bf != 0 || bfi != 0) & (st->inBackgroundNoise != 0) & ( (mode == MR475) || (mode == MR515) || (mode == MR59))) { /* if (gain_pit > 0.75) in Q14*/ if (gain_pit > 12288) /* gain_pit = (gain_pit-0.75)/2.0 + 0.75; */ gain_pit = ((gain_pit - 12288) >> 1) + 12288; /* if (gain_pit > 0.90) in Q14*/ if (gain_pit > 14745) { gain_pit = 14745; } } /* * Calculate CB mixed gain */ Int_lsf(prev_lsf, st->lsfState.past_lsf_q, i_subfr, lsf_i); gain_code_mix = Cb_gain_average(&st->Cb_gain_averState, mode, gain_code, lsf_i, st->lsp_avg_st.lsp_meanSave, bfi, st->prev_bf, pdfi, st-> prev_pdf, st->inBackgroundNoise, st->voicedHangover); /* make sure that MR74, MR795, MR122 have original codeGain*/ /* MR74, MR795, MR122 */ if ((mode > MR67) & (mode != MR102)) { gain_code_mix = gain_code; } /* * Find the total excitation. * Find synthesis speech corresponding to st->exc[]. */ /* MR475, MR515, MR59, MR67, MR74, MR795, MR102*/ if (mode <= MR102) { pitch_fac = gain_pit; tmp_shift = 1; } /* MR122 */ else { pitch_fac = gain_pit >> 1; tmp_shift = 2; } /* * copy unscaled LTP excitation to exc_enhanced (used in phase * dispersion below) and compute total excitation for LTP feedback */ memcpy(exc_enhanced, &st->old_exc[st->exc], L_SUBFR << 2); for (i = 0; i < L_SUBFR; i++) { /* st->exc[i] = gain_pit*st->exc[i] + gain_code*code[i]; */ temp = (st->old_exc[st->exc + i] * pitch_fac) + (code[i] * gain_code); temp2 = (temp << tmp_shift); if (((temp2 >> 1) ^ temp2) & 0x40000000) { if ((temp ^ temp2) & 0x80000000) { temp2 = (temp & 0x80000000) ? (-1073741824L) : 1073725439; } else { temp2 = (temp2 & 0x80000000) ? (-1073741824L) : 1073725439; } } st->old_exc[st->exc + i] = (temp2 + 0x00004000L) >> 15; } /* * Adaptive phase dispersion */ /* free phase dispersion adaption */ st->ph_disp_st.lockFull = 0; if (((mode == MR475) || (mode == MR515) || (mode == MR59)) & (st ->voicedHangover > 3) & (st->inBackgroundNoise != 0) & (bfi != 0 )) { /* * Always Use full Phase Disp. * if error in bg noise */ st->ph_disp_st.lockFull = 1; } /* * apply phase dispersion to innovation (if enabled) and * compute total excitation for synthesis part */ ph_disp(&st->ph_disp_st, mode, exc_enhanced, gain_code_mix, gain_pit, code , pitch_fac, tmp_shift); /* * The Excitation control module are active during BFI. * Conceal drops in signal energy if in bg noise. */ temp2 = 0; for (i = 0; i < L_SUBFR; i++) { temp2 += (exc_enhanced[i] * exc_enhanced[i]); } if (temp2 > 0x3FFFFFFF) { excEnergy = 11584; } else { temp2 = sqrt_l_exp(temp2, &temp); temp2 = (temp2 >> ((temp >> 1) + 15)); excEnergy = temp2 >> 2; } if (((mode == MR475) || (mode == MR515) || (mode == MR59)) & (st ->voicedHangover > 5) & (st->inBackgroundNoise != 0) & (st-> state < 4) & (((pdfi != 0) & (st->prev_pdf != 0)) || bfi != 0 || st->prev_bf != 0)) { carefulFlag = 0; if ((pdfi != 0) & (bfi == 0)) { carefulFlag = 1; } Ex_ctrl(exc_enhanced, excEnergy, st->excEnergyHist, st->voicedHangover , st->prev_bf, carefulFlag); } if ((st->inBackgroundNoise != 0) & (bfi != 0 || st->prev_bf != 0) & ( st->state < 4)) { ; /* do nothing! */ } else { /* Update energy history for all modes */ for (i = 0; i < 8; i++) { st->excEnergyHist[i] = st->excEnergyHist[i + 1]; } st->excEnergyHist[8] = excEnergy; } /* * Excitation control module end. */ if (pit_sharp > 16384) { for (i = 0; i < L_SUBFR; i++) { excp[i] = excp[i] + exc_enhanced[i]; if (labs(excp[i]) > 32767) excp[i] = (excp[i] & 0x80000000) ? -32768 : 32767; } agc2(exc_enhanced, excp); overflow = Syn_filt(Az, excp, &synth[i_subfr], L_SUBFR, st->mem_syn, 0 ); } else { overflow = Syn_filt(Az, exc_enhanced, &synth[i_subfr], L_SUBFR, st-> mem_syn, 0); } if (overflow) { for (i = 0; i < PIT_MAX + L_INTERPOL + L_SUBFR; i++) { st->old_exc[i] = st->old_exc[i] >> 2; } for (i = 0; i < L_SUBFR; i++) { exc_enhanced[i] = exc_enhanced[i] >> 2; } Syn_filt_overflow(Az, exc_enhanced, &synth[i_subfr], L_SUBFR, st->mem_syn, 1); } else { memcpy(st->mem_syn, &synth[i_subfr + 30], 40); } /* * Update signal for next frame. * -> shift to the left by L_SUBFR st->exc[] */ memcpy(&st->old_exc[0], &st->old_exc[L_SUBFR], (PIT_MAX + L_INTERPOL) << 2); /* interpolated LPC parameters for next subframe */ Az += MP1; /* store T0 for next subframe */ st->old_T0 = T0; } /* * Call the Source Characteristic Detector which updates * st->inBackgroundNoise and st->voicedHangover. */ st->inBackgroundNoise = Bgn_scd(&st->background_state, &(st->ltpGainHistory[ 0]), &(synth[0]), &(st->voicedHangover)); dtx_dec_activity_update(&st->dtxDecoderState, st->lsfState.past_lsf_q, synth); /* store bfi for next subframe */ st->prev_bf = bfi; st->prev_pdf = pdfi; /* * Calculate the LSF averages on the eight * previous frames */ lsp_avg(&st->lsp_avg_st, st->lsfState.past_lsf_q); theEnd: st->dtxDecoderState.dtxGlobalState = newDTXState; return; } /* * Residu40 * * * Parameters: * a I: prediction coefficients * x I: speech signal * y O: residual signal * * Function: * The LP residual is computed by filtering the input * speech through the LP inverse filter a(z) * * Returns: * void */ __device__ static void Residu40(Word32 a[], Word32 x[], Word32 y[]) { Word32 s, i, j; for (i = 0; i < 40; i++) { s = a[0] * x[i] + a[1] * x[i - 1] + a[2] * x[i - 2] + a[3] * x[i - 3]; s += a[4] * x[i - 4] + a[5] * x[i - 5] + a[6] * x[i - 6] + a[7] * x[i - 7] ; s += a[8] * x[i - 8] + a[9] * x[i - 9] + a[10] * x[i - 10]; y[i] = (s + 0x800) >> 12; if (abs(y[i]) > 32767) { /* go to safe mode */ for (i = 0; i < 40; i++) { s = a[0] * x[i]; for (j = 1; j <= 10; j++) { s += a[j] * x[i - j]; if (s > 1073741823) { s = 1073741823; } else if (s < -1073741824) { s = -1073741824; } } y[i] = (s + 0x800) >> 12; if (abs(y[i]) > 32767) y[i] = (y[i] & 0x80000000) ? -32768 : 32767; } return; } } return; } /* * agc * * * Parameters: * st->past_gain B: gain memory * sig_in I: Post_Filter input signal * sig_out B: Post_Filter output signal * agc_fac I: AGC factor * * Function: * Scales the Post_Filter output on a subframe basis * * Returns: * void */ __device__ static void agc(agcState *st, Word32 *sig_in, Word32 *sig_out, Word16 agc_fac) { Word32 s, gain_in, gain_out, g0, gain; int exp, i; /* calculate gain_out with exponent */ s = energy_new(sig_out); if (s == 0) { st->past_gain = 0; return; } exp = 0; i = s; while (!(i & 0x40000000)) { exp++; i = i << 1; } exp -= 1; if (exp & 0x80000000) { s >>= 1; } else { s <<= exp; } gain_out = (s + 0x00008000L) >> 16; /* calculate gain_in with exponent */ s = energy_new(sig_in); if (s == 0) { g0 = 0; } else { i = 0; while (!(s & 0x40000000)) { i++; s = s << 1; } s = s + 0x00008000L; if (s >= 0) gain_in = s >> 16; else gain_in = 32767; exp = (exp - i); /* * g0 = (1-agc_fac) * sqrt(gain_in/gain_out); */ /* s = gain_out / gain_in */ s = (gain_out << 15) / gain_in; exp = 7 - exp; if (exp > 0) { if (exp > 31) { if (s) { s = 2147483647; } } else { s = s << exp; } } else s = (s >> (-exp)); if (s < 0) s = 2147483647; s = Inv_sqrt(s); i = ((s << 9) + 0x00008000L) >> 16; if (i & 0xFFFF8000) i = 32767; /* g0 = i * (1-agc_fac) */ g0 = (i * (32767 - agc_fac)) >> 15; } /* * compute gain[n] = agc_fac * gain[n-1] + (1-agc_fac) * sqrt(gain_in/gain_out) * sig_out[n] = gain[n] * sig_out[n] */ gain = st->past_gain; for (i = 0; i < L_SUBFR; i++) { gain = (gain * agc_fac) >> 15; gain = gain + g0; sig_out[i] = (sig_out[i] * gain) >> 12; if (labs(sig_out[i]) > 32767) sig_out[i] = (sig_out[i] & 0x8000000) ? -32768 : 32767; } st->past_gain = gain; return; } /* * Post_Filter * * * Parameters: * st B: post filter states * mode I: AMR mode * syn B: synthesis speech * Az_4 I: interpolated LPC parameters in all subfr. * * Function: * Post_Filtering of synthesis speech. * * inverse filtering of syn[] through A(z/0.7) to get res2[] * tilt compensation filtering; 1 - MU*k*z^-1 * synthesis filtering through 1/A(z/0.75) * adaptive gain control * * Returns: * void */ __device__ static void Post_Filter(Post_FilterState *st, enum Mode mode, Word32 *syn, Word32 *Az_4) { Word32 h[22], Ap3[MP1], Ap4[MP1]; /* bandwidth expanded LP parameters */ Word32 tmp, i_subfr, i, temp1, temp2, overflow = 0; Word32 *Az, *p1, *p2, *syn_work = &st->synth_buf[M]; const Word32 *pgamma3 = &gamma3[0]; const Word32 *pgamma4 = &gamma4_gamma3_MR122[0]; /* * Post filtering */ memcpy(syn_work, syn, L_FRAME << 2); Az = Az_4; if ((mode == MR122) || (mode == MR102)) { pgamma3 = &gamma4_gamma3_MR122[0]; pgamma4 = &gamma4_MR122[0]; } for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR) { /* Find weighted filter coefficients Ap3[] and Ap[4] */ Ap3[0] = Az[0]; Ap4[0] = Az[0]; for (i = 1; i <= 10; i++) { Ap3[i] = (Az[i] * pgamma3[i - 1] + 0x4000) >> 15; Ap4[i] = (Az[i] * pgamma4[i - 1] + 0x4000) >> 15; } /* filtering of synthesis speech by A(z/0.7) to find res2[] */ Residu40(Ap3, &syn_work[i_subfr], st->res2); /* tilt compensation filter */ /* impulse response of A(z/0.7)/A(z/0.75) */ memcpy(h, Ap3, MP1 << 2); memset(&h[M + 1], 0, (22 - M - 1) << 2); Syn_filt(Ap4, h, h, 22, &h[M + 1], 0); /* 1st correlation of h[] */ tmp = 16777216 + h[1] * h[1]; for (i = 2; i < 22; i++) { tmp += h[i] * h[i]; if (tmp > 0x3FFF8000) break; } temp1 = tmp >> 15; if (temp1 & 0xFFFF8000) temp1 = 32767; tmp = h[0] * h[1]; for (i = 1; i < 21; i++) { tmp += h[i] * h[i + 1]; if (abs(tmp) > 1073741823) tmp = 1073741823; } temp2 = tmp >> 15; if (temp2 <= 0) { temp2 = 0; } else { tmp = temp2 * 26214; temp2 = (tmp & 0xffff8000) / temp1; } /* preemphasis */ p1 = st->res2 + 39; p2 = p1 - 1; tmp = *p1; do { *p1 = *p1 - ((temp2 * *p2--) >> 15); if (abs(*p1) > 32767) { *p1 = (*p1 & 0x80000000) ? -32768 : 32767; } p1--; *p1 = *p1 - ((temp2 * *p2--) >> 15); if (abs(*p1) > 32767) { *p1 = (*p1 & 0x80000000) ? -32768 : 32767; } p1--; *p1 = *p1 - ((temp2 * *p2--) >> 15); if (abs(*p1) > 32767) { *p1 = (*p1 & 0x80000000) ? -32768 : 32767; } p1--; } while (p1 > st->res2); *p1 = *p1 - ((temp2 * st->preemph_state_mem_pre) >> 15); if (abs(*p1) > 32767) { *p1 = (*p1 & 0x80000000) ? -32768 : 32767; } st->preemph_state_mem_pre = tmp; /* filtering through 1/A(z/0.75) */ overflow = Syn_filt(Ap4, st->res2, &syn[i_subfr], L_SUBFR, st->mem_syn_pst, 0); if (overflow) { Syn_filt_overflow(Ap4, st->res2, &syn[i_subfr], L_SUBFR, st->mem_syn_pst, 1); overflow = 0; } else { memcpy(st->mem_syn_pst, &syn[i_subfr + 30], 40); } /* scale output to input */ agc(&st->agc_state, &syn_work[i_subfr], &syn[i_subfr], AGC_FAC); Az += MP1; } /* update syn_work[] buffer */ memcpy(&syn_work[-M], &syn_work[L_FRAME - M], M << 2); return; } /* * Post_Process * * * Parameters: * st B: post filter states * signal B: signal * * Function: * Postprocessing of input speech. * * 2nd order high pass filtering with cut off frequency at 60 Hz. * Multiplication of output by two. * * * Returns: * void */ __device__ static void Post_Process(Post_ProcessState *st, Word32 signal[]) { Word32 x2, tmp, i = 0; Word32 mask = 0x40000000; do { x2 = st->x1; st->x1 = st->x0; st->x0 = signal[i]; /* * y[i] = b[0]*x[i]*2 + b[1]*x[i-1]*2 + b140[2]*x[i-2]/2 * + a[1]*y[i-1] + a[2] * y[i-2]; */ tmp = (st->y1_hi * 15836) + (((st->y1_lo * 15836) & (Word32)0xffff8000) >> 15); tmp += (st->y2_hi * -7667) + (((st->y2_lo * (-7667)) & (Word32)0xffff8000) >> 15); tmp += st->x0 * 7699; tmp += st->x1 * -15398; if (((tmp >> 1) ^ tmp) & mask) tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823; tmp += x2 * 7699; if (((tmp >> 1) ^ tmp) & mask) tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823; tmp = tmp << 1; if (((tmp >> 1) ^ tmp) & mask) tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823; tmp = tmp << 1; if (((tmp >> 1) ^ tmp) & mask) tmp = (tmp & 0x80000000) ? -1073741824 : 1073741823; if (labs(tmp) < 536862720) { signal[i++] = (tmp + 0x00002000L) >> 14; } else if (tmp > 0) { signal[i++] = 32767; } else { signal[i++] = -32768; } st->y2_hi = st->y1_hi; st->y2_lo = st->y1_lo; st->y1_hi = tmp >> 15; st->y1_lo = ((tmp << 1) - (st->y1_hi << 16)) >> 1; } while (i < 160); return; } /* * Speech_Decode_Frame * * * Parameters: * st B: decoder memory * mode I: AMR mode * parm I: speech parameters * frame_type I: Frame type * synth O: synthesis speech * Function: * Decode one frame * * Returns: * void */ __device__ void Speech_Decode_Frame(Speech_Decode_FrameState* state, enum Mode mode, Word16 *parm, enum RXFrameType frame_type, Word16 *synth) { Word32 Az_dec[AZ_SIZE]; /* Decoded Az for post-filter in 4 subframes*/ Word32 synth_speech[L_FRAME]; Word32 i; /* Synthesis */ Decoder_amr(&state->decoder_amrState, mode, parm, frame_type, synth_speech, Az_dec); Post_Filter(&state->post_state, mode, synth_speech, Az_dec); /* post HP filter, and 15->16 bits */ Post_Process(&state->postHP_state, synth_speech); for (i = 0; i < L_FRAME; i++) { #ifndef NO13BIT /* Truncate to 13 bits */ synth[i] = (Word16)(synth_speech[i] & 0xfff8); #else synth[i] = (Word16)(synth_speech[i]); #endif } return; } /* * Post_Process_reset * * * Parameters: * state B: state structure * * Function: * Resets state memory * * Returns: * -1 failure */ __device__ static int Post_Process_reset(Post_ProcessState *state) { state->y2_hi = 0; state->y2_lo = 0; state->y1_hi = 0; state->y1_lo = 0; state->x0 = 0; state->x1 = 0; return 0; } /* * Decoder_amr_init * * * Parameters: * state O: state structure * * Function: * Allocates state memory and initializes state memory * * Returns: * success = 0 */ __device__ static int Decoder_amr_init(Decoder_amrState *state) { Decoder_amr_reset(state, MR475); return 0; } /* * Post_Filter_reset * * * Parameters: * state B: state structure * * Function: * Resets state memory * * Returns: * -1 failure */ __device__ static int Post_Filter_reset(Post_FilterState *state) { state->preemph_state_mem_pre = 0; state->agc_state.past_gain = 4096; memset(state->mem_syn_pst, 0, M << 2); memset(state->res2, 0, L_SUBFR << 2); memset(state->synth_buf, 0, (L_FRAME + M) << 2); return 0; } /* * Post_Filter_init * * * Parameters: * state O: state structure * * Function: * Allocates state memory and initializes state memory * * Returns: * success = 0 */ __device__ static int Post_Filter_init(Post_FilterState* state) { Post_Filter_reset(state); return 0; } /* * Post_Process_init * * * Parameters: * state O: state structure * * Function: * Allocates state memory and initializes state memory * * Returns: * success = 0 */ __device__ static int Post_Process_init(Post_ProcessState *state) { Post_Process_reset(state); return 0; } /* * Speech_Decode_Frame_reset * * * Parameters: * state B: state structure * * Function: * Resets state memory * * Returns: * -1 = failure */ __device__ int Speech_Decode_Frame_reset(Speech_Decode_FrameState* state) { Decoder_amr_reset(&state->decoder_amrState, (enum Mode)0); Post_Filter_reset(&state->post_state); Post_Process_reset(&state->postHP_state); return 0; } /* * Speech_Decode_Frame_init * * * Parameters: * state O: state structure * * Function: * Allocates state memory and initializes state memory * * Returns: * success = 0 */ __device__ void Speech_Decode_Frame_init(Speech_Decode_FrameState* state) { Decoder_amr_init(&state->decoder_amrState); Post_Filter_init(&state->post_state); Post_Process_init(&state->postHP_state); return; } __device__ enum Mode DecoderMMS(Word16 *param, UWord8 *stream, enum RXFrameType *frame_type, enum Mode *speech_mode, Word16 *q_bit) { enum Mode mode; Word32 j; Word16 *mask; memset(param, 0, PRMNO_MR122 << 1); *q_bit = 0x01 & (*stream >> 2); mode = Mode(0x0F & (*stream >> 3)); stream++; if (mode == MRDTX) { mask = order_MRDTX; for (j = 1; j < 36; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } /* get SID type bit */ *frame_type = RX_SID_FIRST; if (*stream & 0x80) *frame_type = RX_SID_UPDATE; /* since there is update, use it */ /* *frame_type = RX_SID_UPDATE; */ /* speech mode indicator */ *speech_mode = Mode((*stream >> 4) & 0x07); *speech_mode = Mode(((*speech_mode & 0x0001) << 2) | (*speech_mode & 0x0002) | ((*speech_mode & 0x0004) >> 2)); } else if (mode == 15) { *frame_type = RX_NO_DATA; } else if (mode == MR475) { mask = order_MR475; for (j = 1; j < 96; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR515) { mask = order_MR515; for (j = 1; j < 104; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR59) { mask = order_MR59; for (j = 1; j < 119; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR67) { mask = order_MR67; for (j = 1; j < 135; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR74) { mask = order_MR74; for (j = 1; j < 149; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR795) { mask = order_MR795; for (j = 1; j < 160; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR102) { mask = order_MR102; for (j = 1; j < 205; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else if (mode == MR122) { mask = order_MR122; for (j = 1; j < 245; j++) { if (*stream & 0x80) param[*mask] = (short)(param[*mask] + *(mask + 1)); mask += 2; if (j % 8) *stream <<= 1; else stream++; } *frame_type = RX_SPEECH_GOOD; } else *frame_type = RX_SPEECH_BAD; return mode; } /* * Decoder_Interface_reset * * * Parameters: * st O: state struct * * Function: * Reset homing frame counter * * Returns: * void */ __device__ void Decoder_Interface_reset(dec_interface_State *st) { st->reset_flag_old = 1; st->prev_ft = RX_SPEECH_GOOD; st->prev_mode = MR475; /* minimum bitrate */ } /* * Decoder_Interface_init * * * Parameters: * void * * Function: * Allocates state memory and initializes state memory * * Returns: * success : pointer to structure * failure : NULL */ __device__ void Decoder_Interface_init(dec_interface_State* state) { Speech_Decode_Frame_init(&state->decoder_State); Decoder_Interface_reset(state); } /* * Decoder_Interface_Decode * * * Parameters: * st B: state structure * bits I: bit stream * synth O: synthesized speech * bfi I: bad frame indicator * * Function: * Decode bit stream to synthesized speech * * Returns: * Void */ __device__ void Decoder_Interface_Decode(dec_interface_State* state, UWord8 *bits, Word16 *synth, int bfi) { enum Mode mode; /* AMR mode */ enum Mode speech_mode = MR475; /* speech mode */ Word16 prm[PRMNO_MR122]; /* AMR parameters */ enum RXFrameType frame_type; /* frame type */ const Word16 *homing; /* pointer to homing frame */ Word16 homingSize; /* frame size for homing frame */ Word32 i; /* counter */ Word32 resetFlag = 1; /* homing frame */ Word16 q_bit; /* * extract mode information and frametype, * octets to parameters */ mode = DecoderMMS(prm, bits, &frame_type, &speech_mode, &q_bit); if (!bfi) bfi = 1 - q_bit; if (bfi == 1) { if (mode <= MR122) { frame_type = RX_SPEECH_BAD; } else if (frame_type != RX_NO_DATA) { frame_type = RX_SID_BAD; mode = state->prev_mode; } } else { if (frame_type == RX_SID_FIRST || frame_type == RX_SID_UPDATE) { mode = speech_mode; } else if (frame_type == RX_NO_DATA) { mode = state->prev_mode; } /* * if no mode information * guess one from the previous frame */ if (frame_type == RX_SPEECH_BAD) { mode = state->prev_mode; if (state->prev_ft >= RX_SID_FIRST) { frame_type = RX_SID_BAD; } } } /* test for homing frame */ if (state->reset_flag_old == 1) { switch (mode) { case MR122: homing = dhf_MR122; homingSize = 18; break; case MR102: homing = dhf_MR102; homingSize = 12; break; case MR795: homing = dhf_MR795; homingSize = 8; break; case MR74: homing = dhf_MR74; homingSize = 7; break; case MR67: homing = dhf_MR67; homingSize = 7; break; case MR59: homing = dhf_MR59; homingSize = 7; break; case MR515: homing = dhf_MR515; homingSize = 7; break; case MR475: homing = dhf_MR475; homingSize = 7; break; default: homing = NULL; homingSize = 0; break; } for (i = 0; i < homingSize; i++) { resetFlag = prm[i] ^ homing[i]; if (resetFlag) break; } } if ((resetFlag == 0) && (state->reset_flag_old != 0)) { for (i = 0; i < 160; i++) { synth[i] = EHF_MASK; } } else Speech_Decode_Frame(&state->decoder_State, mode, prm, frame_type, synth); if (state->reset_flag_old == 0) { /* check whole frame */ switch (mode) { case MR122: homing = dhf_MR122; homingSize = PRMNO_MR122; break; case MR102: homing = dhf_MR102; homingSize = PRMNO_MR102; break; case MR795: homing = dhf_MR795; homingSize = PRMNO_MR795; break; case MR74: homing = dhf_MR74; homingSize = PRMNO_MR74; break; case MR67: homing = dhf_MR67; homingSize = PRMNO_MR67; break; case MR59: homing = dhf_MR59; homingSize = PRMNO_MR59; break; case MR515: homing = dhf_MR515; homingSize = PRMNO_MR515; break; case MR475: homing = dhf_MR475; homingSize = PRMNO_MR475; break; default: homing = NULL; homingSize = 0; } for (i = 0; i < homingSize; i++) { resetFlag = prm[i] ^ homing[i]; if (resetFlag) break; } } /* reset decoder if current frame is a homing frame */ if (resetFlag == 0) { Speech_Decode_Frame_reset(&state->decoder_State); } state->reset_flag_old = !resetFlag; state->prev_ft = frame_type; state->prev_mode = mode; }
d793d98078f022ed160d9bc05bdcf80e1a9cb985.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <math.h> __global__ void reduce_kernel(float *in, float *out, int ntot) { int i = blockIdx.x; int j = i * blockDim.x + threadIdx.x; if(j < ntot) out[i] += in[j]; } __host__ void init_vec(float *h_in, int ntot) { for(int i = 0 ; i < ntot ; i++) { h_in[i] = sinf(float(i)); } } __host__ void verif(float sum, float *h_in, int ntot) { float sum_res = 0.; for(int i = 0 ; i < ntot ; i++) { sum_res += h_in[i]; } float err = fabsf((sum - sum_res)/sum); printf("GPU sum : %.4e\n", sum); printf("CPU sum : %.4e\n", sum_res); if (err < 1.e-4) { printf("TEST PASSED (err %.4e < 1.e-4).\n", err); } else { printf("TEST FAILED (err %.4e > 1.e-4).\n", err); } } int main(int argc, char **argv) { float sum; int nthreads, nblocks, ntot; int i, j; nthreads = 128; ntot = atoi(argv[1]); nblocks = (ntot + nthreads - 1) / nthreads; printf("Ntot : %d\n", ntot); printf("nthreads : %d\n", nthreads); printf("nblocks : %d\n", nblocks); float *d_sum, *d_bl, *d_in, *h_in, *h_bl; h_in = (float*)malloc(ntot*sizeof(float)); h_bl = (float*)malloc(nblocks*sizeof(float)); hipMalloc((void**)&d_sum, sizeof(float)); hipMalloc((void**)&d_bl, nblocks*sizeof(float)); hipMalloc((void**)&d_in, ntot*sizeof(float)); init_vec(h_in, ntot); for(j = 0; j < ntot; j++) printf("|%f",h_in[j]); hipMemcpy(d_in, h_in, ntot*sizeof(float), hipMemcpyHostToDevice); // TODO : la rduction de d_in a lieu ici, le resultat est obtenu dans *d_sum dim3 dimGrid(nblocks,1,1); dim3 dimBlock(nthreads,1,1); hipLaunchKernelGGL(( reduce_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_in, d_bl, ntot); hipMemcpy(h_bl, d_bl, sizeof(float), hipMemcpyDeviceToHost); for(i = 0; i < nblocks; i++) printf("Block %d : ",i); for(j = 0; j < ntot / nblocks; j++) printf("|%f",h_in[i*nblocks+j]); printf(" - result %f\n",h_bl[i]); //hipMemcpy(&sum, d_sum, sizeof(float), hipMemcpyDeviceToHost); //verif(sum, h_in, ntot); hipFree(d_sum); hipFree(d_bl); hipFree(d_in); free(h_in); return 0; }
d793d98078f022ed160d9bc05bdcf80e1a9cb985.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> __global__ void reduce_kernel(float *in, float *out, int ntot) { int i = blockIdx.x; int j = i * blockDim.x + threadIdx.x; if(j < ntot) out[i] += in[j]; } __host__ void init_vec(float *h_in, int ntot) { for(int i = 0 ; i < ntot ; i++) { h_in[i] = sinf(float(i)); } } __host__ void verif(float sum, float *h_in, int ntot) { float sum_res = 0.; for(int i = 0 ; i < ntot ; i++) { sum_res += h_in[i]; } float err = fabsf((sum - sum_res)/sum); printf("GPU sum : %.4e\n", sum); printf("CPU sum : %.4e\n", sum_res); if (err < 1.e-4) { printf("TEST PASSED (err %.4e < 1.e-4).\n", err); } else { printf("TEST FAILED (err %.4e > 1.e-4).\n", err); } } int main(int argc, char **argv) { float sum; int nthreads, nblocks, ntot; int i, j; nthreads = 128; ntot = atoi(argv[1]); nblocks = (ntot + nthreads - 1) / nthreads; printf("Ntot : %d\n", ntot); printf("nthreads : %d\n", nthreads); printf("nblocks : %d\n", nblocks); float *d_sum, *d_bl, *d_in, *h_in, *h_bl; h_in = (float*)malloc(ntot*sizeof(float)); h_bl = (float*)malloc(nblocks*sizeof(float)); cudaMalloc((void**)&d_sum, sizeof(float)); cudaMalloc((void**)&d_bl, nblocks*sizeof(float)); cudaMalloc((void**)&d_in, ntot*sizeof(float)); init_vec(h_in, ntot); for(j = 0; j < ntot; j++) printf("|%f",h_in[j]); cudaMemcpy(d_in, h_in, ntot*sizeof(float), cudaMemcpyHostToDevice); // TODO : la réduction de d_in a lieu ici, le resultat est obtenu dans *d_sum dim3 dimGrid(nblocks,1,1); dim3 dimBlock(nthreads,1,1); reduce_kernel<<<dimGrid, dimBlock>>>(d_in, d_bl, ntot); cudaMemcpy(h_bl, d_bl, sizeof(float), cudaMemcpyDeviceToHost); for(i = 0; i < nblocks; i++) printf("Block %d : ",i); for(j = 0; j < ntot / nblocks; j++) printf("|%f",h_in[i*nblocks+j]); printf(" - result %f\n",h_bl[i]); //cudaMemcpy(&sum, d_sum, sizeof(float), cudaMemcpyDeviceToHost); //verif(sum, h_in, ntot); cudaFree(d_sum); cudaFree(d_bl); cudaFree(d_in); free(h_in); return 0; }
a9b6ad02de59061fca23d454672a01d9cfdde856.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "grid_stride_range.hpp" #include "execution.hpp" #include "vector_traits.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, std::size_t N> __global__ void fill_vec(Span<T> output, T value) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; for (int j = 0; j < vector_type::size(); j++) vec.data[j] = value; v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void copy_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto input_vPtr = vector_type::get_pointer(input.data()); auto output_vPtr = vector_type::get_pointer(output.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); v_store(output_vPtr[i], vec); } } } template <class T, std::size_t N> static void launch_vectorized_fill(const Stream& stream, Span<T> output, T value) { CV_Assert(is_fully_aligned<T>(output, N)); auto kernel = raw::fill_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, value); } template <class T> void fill(const Stream& stream, Span<T> output, T value) { if (is_fully_aligned<T>(output, 4)) { launch_vectorized_fill<T, 4>(stream, output, value); } else if (is_fully_aligned<T>(output, 2)) { launch_vectorized_fill<T, 2>(stream, output, value); } else { launch_vectorized_fill<T, 1>(stream, output, value); } } template void fill(const Stream&, Span<__half>, __half); template void fill(const Stream&, Span<float>, float); template <class T, std::size_t N> static void launch_vectorized_copy(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::copy_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void copy(const Stream& stream, Span<T> output, View<T> input) { if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_copy<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_copy<T, 2>(stream, output, input); } else { launch_vectorized_copy<T, 1>(stream, output, input); } } template void copy(const Stream&, Span<__half>, View<__half>); template void copy(const Stream&, Span<float>, View<float>); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
a9b6ad02de59061fca23d454672a01d9cfdde856.cu
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <cuda_runtime.h> #include <cuda_fp16.h> #include "grid_stride_range.hpp" #include "execution.hpp" #include "vector_traits.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, std::size_t N> __global__ void fill_vec(Span<T> output, T value) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; for (int j = 0; j < vector_type::size(); j++) vec.data[j] = value; v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void copy_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto input_vPtr = vector_type::get_pointer(input.data()); auto output_vPtr = vector_type::get_pointer(output.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); v_store(output_vPtr[i], vec); } } } template <class T, std::size_t N> static void launch_vectorized_fill(const Stream& stream, Span<T> output, T value) { CV_Assert(is_fully_aligned<T>(output, N)); auto kernel = raw::fill_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, value); } template <class T> void fill(const Stream& stream, Span<T> output, T value) { if (is_fully_aligned<T>(output, 4)) { launch_vectorized_fill<T, 4>(stream, output, value); } else if (is_fully_aligned<T>(output, 2)) { launch_vectorized_fill<T, 2>(stream, output, value); } else { launch_vectorized_fill<T, 1>(stream, output, value); } } template void fill(const Stream&, Span<__half>, __half); template void fill(const Stream&, Span<float>, float); template <class T, std::size_t N> static void launch_vectorized_copy(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::copy_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void copy(const Stream& stream, Span<T> output, View<T> input) { if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_copy<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_copy<T, 2>(stream, output, input); } else { launch_vectorized_copy<T, 1>(stream, output, input); } } template void copy(const Stream&, Span<__half>, View<__half>); template void copy(const Stream&, Span<float>, View<float>); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
30f04f6d5ff77b6b8454b07a5f86e523a0b8a3e5.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <sys/time.h> #include <string.h> #include "hip/hip_runtime.h" #include "rocblas.h" using namespace std; #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) #define IDX2C(i,j,ld) (((j)*(ld))+(i)) void CPU_MatMul(double * A, double * B ,double * C,int m,int n,int k){ for(int i = 0;i < m;i++){ for(int j = 0;j < k;j++){ for(int x = 0;x < n;x++){ C[IDX2C(i,j,k)] += A[i * n + x] * B[x * k + j]; } } } } int main() { int m,n,k; timeval t1, t2; cout << "Input problem size:"; cin >> m; n = m; k = m; hipblasHandle_t handle; hipblasCreate(&handle); double *A,*B,*C; A = (double*)malloc(sizeof(double) * m * n); B = (double*)malloc(sizeof(double) * k * n); C = (double*)malloc(sizeof(double) * m * k); for(int i = 0;i < m;i++){ for(int j = 0;j < n;j++){ A[i * n + j] = rand() % 10; } } for(int i = 0;i < n;i++){ for(int j = 0;j < k;j++){ B[i * k + j] = rand() % 10; } } memset(C,0,sizeof(C)); double * d_A,*d_B,*d_C; gettimeofday(&t1, NULL); hipMalloc(&d_A, sizeof(double) * m * n); hipMalloc(&d_B,sizeof(double) * n * k); hipMalloc(&d_C,sizeof(double) * m * k); hipMemcpy(d_A, A, sizeof(double) * m * n, hipMemcpyHostToDevice); hipMemcpy(d_B, B, sizeof(double) * n * k, hipMemcpyHostToDevice); gettimeofday(&t1, NULL); double a = 1,b = 0; hipblasDgemm( handle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, k, &a, //alpha d_A, n, d_B, k, &b, //beta d_C, m ); hipMemcpy(C, d_C, sizeof(double) * m * k, hipMemcpyDeviceToHost); gettimeofday(&t2, NULL); printf("GPU time is:%lds\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec); hipFree(d_A); hipFree(d_B); hipFree(d_C); free(A); free(B); free(C); hipblasDestroy(handle); }
30f04f6d5ff77b6b8454b07a5f86e523a0b8a3e5.cu
#include <iostream> #include <stdio.h> #include <sys/time.h> #include <string.h> #include "cuda_runtime.h" #include "cublas_v2.h" using namespace std; #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) #define IDX2C(i,j,ld) (((j)*(ld))+(i)) void CPU_MatMul(double * A, double * B ,double * C,int m,int n,int k){ for(int i = 0;i < m;i++){ for(int j = 0;j < k;j++){ for(int x = 0;x < n;x++){ C[IDX2C(i,j,k)] += A[i * n + x] * B[x * k + j]; } } } } int main() { int m,n,k; timeval t1, t2; cout << "Input problem size:"; cin >> m; n = m; k = m; cublasHandle_t handle; cublasCreate(&handle); double *A,*B,*C; A = (double*)malloc(sizeof(double) * m * n); B = (double*)malloc(sizeof(double) * k * n); C = (double*)malloc(sizeof(double) * m * k); for(int i = 0;i < m;i++){ for(int j = 0;j < n;j++){ A[i * n + j] = rand() % 10; } } for(int i = 0;i < n;i++){ for(int j = 0;j < k;j++){ B[i * k + j] = rand() % 10; } } memset(C,0,sizeof(C)); double * d_A,*d_B,*d_C; gettimeofday(&t1, NULL); cudaMalloc(&d_A, sizeof(double) * m * n); cudaMalloc(&d_B,sizeof(double) * n * k); cudaMalloc(&d_C,sizeof(double) * m * k); cudaMemcpy(d_A, A, sizeof(double) * m * n, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, sizeof(double) * n * k, cudaMemcpyHostToDevice); gettimeofday(&t1, NULL); double a = 1,b = 0; cublasDgemm( handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, k, &a, //alpha d_A, n, d_B, k, &b, //beta d_C, m ); cudaMemcpy(C, d_C, sizeof(double) * m * k, cudaMemcpyDeviceToHost); gettimeofday(&t2, NULL); printf("GPU time is:%ldμs\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(A); free(B); free(C); cublasDestroy(handle); }
42e857c1bac023f54d8905ad30f8cd0748d9c901.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdlib> #include <string> //#include <time.h> #include "img_utils.hpp" /*typedef unsigned long long ttype; ttype gettime(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return (((ttype)ts.tv_sec*1e9) + ts.tv_nsec); }*/ //#define maskCols 5 //#define maskRows 5 //#define filter_width 5 // #define Mask_radius Mask_width/2 #define TILE_WIDTH 32 // #define w (TILE_WIDTH + filter_width - 1) #define clamp(x) (min(max((x), 0.0), 1.0)) using namespace std; //@TODO@ : Write the kernel here __global__ void convolution_2D_tiled_kernel(float *I, const float* __restrict__ M, float *P, int channels, int width, int height, int filter_width) { int k; int Mask_radius = filter_width/2; int w= (TILE_WIDTH + filter_width - 1); extern __shared__ float N_ds[]; for (k = 0; k < channels; k++) { // First batch loading int dest = threadIdx.y * TILE_WIDTH + threadIdx.x, destY = dest / w, destX = dest % w, srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius, srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius, src = (srcY * width + srcX) * channels + k; if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) N_ds[destY*w+destX] = I[src]; else N_ds[destY*w+destX] = 0; // Second batch loading int dest1 = dest + TILE_WIDTH * TILE_WIDTH; destY = dest1 / w, destX = dest1 % w, srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius, srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius, src = (srcY * width + srcX) * channels + k; if (destY < w) { if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) N_ds[destY*w+destX] = I[src]; else N_ds[destY*w+destX] = 0; } __syncthreads(); float accum = 0; int y, x; for (y = 0; y < filter_width; y++) for (x = 0; x < filter_width; x++) accum += N_ds[(threadIdx.y + y)*w+(threadIdx.x + x)] * M[y * filter_width + x]; y = blockIdx.y * TILE_WIDTH + threadIdx.y; x = blockIdx.x * TILE_WIDTH + threadIdx.x; if (y < height && x < width) P[(y * width + x) * channels + k] = accum; __syncthreads(); } } int main(int argc, char **argv) { if(argc!=3) { cout<<"Program takes two image filenames as parameters"<<endl; exit(3); } float *imgIn, *imgOut; int nCols, nRows, channels; // Allocate images and initialize from file imgIn = read_image_asfloat(argv[1],&nCols, &nRows, &channels); int imgSize = nCols * nRows * channels; //imgOut = (float *)calloc(nCols * nRows * channels, sizeof(float)); // Allocations on host //@TODO@ : hipHostMalloc((void **) &imgOut, imgSize * sizeof(int), hipHostMallocDefault); // blur mask int filter_width = 7; float maskData[filter_width * filter_width]; for(int i=0; i<filter_width * filter_width; i++) maskData[i] = 1.0/((float)filter_width * filter_width); // Allocates device images //float *d_imgIn, *d_imgOut, *d_MaskData; const int blockSize = 256, nStreams = 4; // const int n = imgSize * blockSize * nStreams; int n = blockSize * nStreams; const int streamSize = imgSize * blockSize * nStreams; /* const int streamBytes = streamSize * sizeof(float);*/ int img_out_start; int img_out_end; int img_in_start; int img_in_end; int Mask_radius = filter_width/2; //const int bytes = n * sizeof(float); hipStream_t streams[nStreams]; float *d_imgIn[nStreams]; float *d_imgOut[nStreams]; float *d_MaskData; hipMalloc(&d_MaskData, filter_width * filter_width * sizeof(float)); hipMemcpy(d_MaskData, maskData, filter_width * filter_width * sizeof(float), hipMemcpyHostToDevice ); for(int i=0;i<nStreams;i++) { hipStreamCreate(&streams[i]); //@TODO@ : Complete for device allocations hipMalloc(&d_imgIn[i], nCols * nRows * channels * sizeof(float)); hipMalloc(&d_imgOut[i], nCols * nRows * channels * sizeof(float)); } //int dim=32; dim3 DimGrid(1 + (nCols-1)/TILE_WIDTH, 1 + (nRows-1)/TILE_WIDTH); //int w = TILE_WIDTH - filter_width + 1; dim3 DimBlock(TILE_WIDTH, TILE_WIDTH); int size = (TILE_WIDTH * TILE_WIDTH)*channels*sizeof (float); /*dim3 DimGrid((nCols-1)/dim+ 1, (nRows-1)/dim+1, 1); dim3 DimBlock(dim, dim, 1);*/ /*size_t nbytes = DimBlock.x*DimBlock.y*sizeof(float);*/ for (int i=0; i<n; i+= streamSize) { for(int j=0;j<nStreams;j++) { if (img_out_start<nRows){ if(img_out_start+n <= nRows) img_out_end=img_out_start+n; else img_out_end=nRows; if(img_out_start - Mask_radius >= 0) img_in_start = img_out_start - Mask_radius; else img_in_start = 0; if(img_out_end + Mask_radius <= nRows) img_in_end = img_out_end + Mask_radius; else img_in_end=nRows; hipMemcpyAsync(d_imgIn[j], imgIn +img_in_start*nCols*channels , (img_in_end - img_in_start)*channels*nCols*sizeof(float), hipMemcpyHostToDevice, streams[j]); hipLaunchKernelGGL(( convolution_2D_tiled_kernel), dim3(DimGrid),dim3(DimBlock),size,streams[j], d_imgIn[j], d_MaskData, d_imgOut[j],channels, nCols, nRows, filter_width); hipMemcpyAsync(imgOut+img_out_start*nCols*channels, d_imgOut[j] , (img_out_end - img_out_start)*channels*nCols*sizeof(float), hipMemcpyDeviceToHost, streams[j]); } img_out_start=img_out_start+n+1; } } hipDeviceSynchronize(); // Write gray image to file write_image_fromfloat(argv[2], imgOut, nCols, nRows, channels); // Free device memory for(int i=0;i<nStreams;i++) { hipStreamDestroy(streams[i]); hipFree(d_imgIn[i]); hipFree(d_imgOut[i]); } hipFree(d_MaskData); // Free host memory hipHostFree(imgIn); hipHostFree(imgOut); return 0; }
42e857c1bac023f54d8905ad30f8cd0748d9c901.cu
#include <iostream> #include <cstdlib> #include <string> //#include <time.h> #include "img_utils.hpp" /*typedef unsigned long long ttype; ttype gettime(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return (((ttype)ts.tv_sec*1e9) + ts.tv_nsec); }*/ //#define maskCols 5 //#define maskRows 5 //#define filter_width 5 // #define Mask_radius Mask_width/2 #define TILE_WIDTH 32 // #define w (TILE_WIDTH + filter_width - 1) #define clamp(x) (min(max((x), 0.0), 1.0)) using namespace std; //@TODO@ : Write the kernel here __global__ void convolution_2D_tiled_kernel(float *I, const float* __restrict__ M, float *P, int channels, int width, int height, int filter_width) { int k; int Mask_radius = filter_width/2; int w= (TILE_WIDTH + filter_width - 1); extern __shared__ float N_ds[]; for (k = 0; k < channels; k++) { // First batch loading int dest = threadIdx.y * TILE_WIDTH + threadIdx.x, destY = dest / w, destX = dest % w, srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius, srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius, src = (srcY * width + srcX) * channels + k; if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) N_ds[destY*w+destX] = I[src]; else N_ds[destY*w+destX] = 0; // Second batch loading int dest1 = dest + TILE_WIDTH * TILE_WIDTH; destY = dest1 / w, destX = dest1 % w, srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius, srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius, src = (srcY * width + srcX) * channels + k; if (destY < w) { if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) N_ds[destY*w+destX] = I[src]; else N_ds[destY*w+destX] = 0; } __syncthreads(); float accum = 0; int y, x; for (y = 0; y < filter_width; y++) for (x = 0; x < filter_width; x++) accum += N_ds[(threadIdx.y + y)*w+(threadIdx.x + x)] * M[y * filter_width + x]; y = blockIdx.y * TILE_WIDTH + threadIdx.y; x = blockIdx.x * TILE_WIDTH + threadIdx.x; if (y < height && x < width) P[(y * width + x) * channels + k] = accum; __syncthreads(); } } int main(int argc, char **argv) { if(argc!=3) { cout<<"Program takes two image filenames as parameters"<<endl; exit(3); } float *imgIn, *imgOut; int nCols, nRows, channels; // Allocate images and initialize from file imgIn = read_image_asfloat(argv[1],&nCols, &nRows, &channels); int imgSize = nCols * nRows * channels; //imgOut = (float *)calloc(nCols * nRows * channels, sizeof(float)); // Allocations on host //@TODO@ : cudaHostAlloc((void **) &imgOut, imgSize * sizeof(int), cudaHostAllocDefault); // blur mask int filter_width = 7; float maskData[filter_width * filter_width]; for(int i=0; i<filter_width * filter_width; i++) maskData[i] = 1.0/((float)filter_width * filter_width); // Allocates device images //float *d_imgIn, *d_imgOut, *d_MaskData; const int blockSize = 256, nStreams = 4; // const int n = imgSize * blockSize * nStreams; int n = blockSize * nStreams; const int streamSize = imgSize * blockSize * nStreams; /* const int streamBytes = streamSize * sizeof(float);*/ int img_out_start; int img_out_end; int img_in_start; int img_in_end; int Mask_radius = filter_width/2; //const int bytes = n * sizeof(float); cudaStream_t streams[nStreams]; float *d_imgIn[nStreams]; float *d_imgOut[nStreams]; float *d_MaskData; cudaMalloc(&d_MaskData, filter_width * filter_width * sizeof(float)); cudaMemcpy(d_MaskData, maskData, filter_width * filter_width * sizeof(float), cudaMemcpyHostToDevice ); for(int i=0;i<nStreams;i++) { cudaStreamCreate(&streams[i]); //@TODO@ : Complete for device allocations cudaMalloc(&d_imgIn[i], nCols * nRows * channels * sizeof(float)); cudaMalloc(&d_imgOut[i], nCols * nRows * channels * sizeof(float)); } //int dim=32; dim3 DimGrid(1 + (nCols-1)/TILE_WIDTH, 1 + (nRows-1)/TILE_WIDTH); //int w = TILE_WIDTH - filter_width + 1; dim3 DimBlock(TILE_WIDTH, TILE_WIDTH); int size = (TILE_WIDTH * TILE_WIDTH)*channels*sizeof (float); /*dim3 DimGrid((nCols-1)/dim+ 1, (nRows-1)/dim+1, 1); dim3 DimBlock(dim, dim, 1);*/ /*size_t nbytes = DimBlock.x*DimBlock.y*sizeof(float);*/ for (int i=0; i<n; i+= streamSize) { for(int j=0;j<nStreams;j++) { if (img_out_start<nRows){ if(img_out_start+n <= nRows) img_out_end=img_out_start+n; else img_out_end=nRows; if(img_out_start - Mask_radius >= 0) img_in_start = img_out_start - Mask_radius; else img_in_start = 0; if(img_out_end + Mask_radius <= nRows) img_in_end = img_out_end + Mask_radius; else img_in_end=nRows; cudaMemcpyAsync(d_imgIn[j], imgIn +img_in_start*nCols*channels , (img_in_end - img_in_start)*channels*nCols*sizeof(float), cudaMemcpyHostToDevice, streams[j]); convolution_2D_tiled_kernel<<<DimGrid,DimBlock,size,streams[j]>>>( d_imgIn[j], d_MaskData, d_imgOut[j],channels, nCols, nRows, filter_width); cudaMemcpyAsync(imgOut+img_out_start*nCols*channels, d_imgOut[j] , (img_out_end - img_out_start)*channels*nCols*sizeof(float), cudaMemcpyDeviceToHost, streams[j]); } img_out_start=img_out_start+n+1; } } cudaDeviceSynchronize(); // Write gray image to file write_image_fromfloat(argv[2], imgOut, nCols, nRows, channels); // Free device memory for(int i=0;i<nStreams;i++) { cudaStreamDestroy(streams[i]); cudaFree(d_imgIn[i]); cudaFree(d_imgOut[i]); } cudaFree(d_MaskData); // Free host memory cudaFreeHost(imgIn); cudaFreeHost(imgOut); return 0; }
642239eb838c15c7cfd6244616fb3bd671ffb179.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gtest/gtest.h" #include "cudex/memory.cu.h" #include "cudex/launcher.cu.h" #include "cudex/device_utils.cu.h" using namespace cudex; namespace { __global__ void setData(DeviceSpan<int> span) { const size_t index = threadLinearIndex(); if (index >= span.size()) { return; } span[index] += 2; } } TEST(launcher, run_1d) { constexpr size_t SIZE = 1e6; HostDeviceMemory<int> mem(SIZE); EXPECT_EQ(mem.size(), SIZE); int cnt = 0; for (int& v : mem.host()) { v = cnt ++; } EXPECT_EQ(cnt, SIZE); mem.copyHostToDeviceAsync(); auto launcher = Launcher().async().size1D(SIZE); EXPECT_LT(SIZE, launcher.threadCount()); EXPECT_EQ(launcher.getSizeBlock().x, Launcher::N_BLOCK_THREADS); EXPECT_EQ(launcher.getSizeBlock().y, 1); EXPECT_EQ(launcher.getSizeBlock().z, 1); static_assert(SIZE % Launcher::N_BLOCK_THREADS != 0); constexpr size_t nBlocks = SIZE / Launcher::N_BLOCK_THREADS + 1; EXPECT_EQ(nBlocks, launcher.blockCount()); EXPECT_EQ(launcher.getSizeGrid().x, nBlocks); EXPECT_EQ(launcher.getSizeGrid().y, 1); EXPECT_EQ(launcher.getSizeGrid().z, 1); launcher.run(setData, mem.device()); mem.copyDeviceToHost(); cnt = 0; for (const auto& v: mem.host()) { EXPECT_EQ(v, cnt++ + 2); } EXPECT_EQ(cnt, SIZE); }
642239eb838c15c7cfd6244616fb3bd671ffb179.cu
#include "gtest/gtest.h" #include "cudex/memory.cu.h" #include "cudex/launcher.cu.h" #include "cudex/device_utils.cu.h" using namespace cudex; namespace { __global__ void setData(DeviceSpan<int> span) { const size_t index = threadLinearIndex(); if (index >= span.size()) { return; } span[index] += 2; } } TEST(launcher, run_1d) { constexpr size_t SIZE = 1e6; HostDeviceMemory<int> mem(SIZE); EXPECT_EQ(mem.size(), SIZE); int cnt = 0; for (int& v : mem.host()) { v = cnt ++; } EXPECT_EQ(cnt, SIZE); mem.copyHostToDeviceAsync(); auto launcher = Launcher().async().size1D(SIZE); EXPECT_LT(SIZE, launcher.threadCount()); EXPECT_EQ(launcher.getSizeBlock().x, Launcher::N_BLOCK_THREADS); EXPECT_EQ(launcher.getSizeBlock().y, 1); EXPECT_EQ(launcher.getSizeBlock().z, 1); static_assert(SIZE % Launcher::N_BLOCK_THREADS != 0); constexpr size_t nBlocks = SIZE / Launcher::N_BLOCK_THREADS + 1; EXPECT_EQ(nBlocks, launcher.blockCount()); EXPECT_EQ(launcher.getSizeGrid().x, nBlocks); EXPECT_EQ(launcher.getSizeGrid().y, 1); EXPECT_EQ(launcher.getSizeGrid().z, 1); launcher.run(setData, mem.device()); mem.copyDeviceToHost(); cnt = 0; for (const auto& v: mem.host()) { EXPECT_EQ(v, cnt++ + 2); } EXPECT_EQ(cnt, SIZE); }
20d3cc4400eeeeabceaca717ffe6e7081d27fcad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This sample implements a separable convolution * of a 2D image with an arbitrary filter. */ #include <stdio.h> #include <stdlib.h> #include <time.h> unsigned int filter_radius; #define FILTER_LENGTH (2 * filter_radius + 1) #define ABS(val) ((val)<0.0 ? (-(val)) : (val)) //#define accuracy 0.05 #define accuracy 0.05 //////////////////////////////////////////////////////////////////////////////// // Row convolution kernel //////////////////////////////////////////////////////////////////////////////// __global__ void ConvolutionRowGPU(float *d_Dst,float *d_Src,float *d_Filter,int filterR){ int x =threadIdx.x; int y =threadIdx.y; int k; float sum=0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < blockDim.x) { sum += d_Src[y*blockDim.x+d] * d_Filter[filterR- k]; } d_Dst[y*blockDim.x+x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Column convolution kernel //////////////////////////////////////////////////////////////////////////////// __global__ void ConvolutionColGPU(float *d_Dst,float *d_Src,float *d_Filter,int filterR){ int x =threadIdx.x; int y =threadIdx.y; float sum=0; for (int k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < blockDim.y) { sum += d_Src[d * blockDim.x + x] * d_Filter[filterR - k]; } d_Dst[y * blockDim.x + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference row convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { float sum = 0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < imageW) { sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { float sum = 0; for (k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < imageH) { sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { float *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Filter, *d_Input, *d_Buffer, *d_OutputGPU; int imageW; int imageH; unsigned int i; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); struct timespec tv1, tv2; printf("Enter filter radius : "); scanf("%d", &filter_radius); // Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa, // dhladh imageW = imageH = N, opou to N to dinei o xrhsths. // Gia aplothta thewroume tetragwnikes eikones. printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH); scanf("%d", &imageW); imageH = imageW; dim3 blockSize(imageW,imageH); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); // Tha htan kalh idea na elegxete kai to apotelesma twn malloc... h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); // to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai // arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai // to convolution kai arxikopoieitai kai auth tuxaia. srand(200); for (i = 0; i < FILTER_LENGTH; i++) { h_Filter[i] = (float)(rand() % 16); } for (i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)rand() / ((float)RAND_MAX / 16); } // To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU. printf("CPU computation...\n"); clock_gettime(CLOCK_MONOTONIC_RAW, &tv1); convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles clock_gettime(CLOCK_MONOTONIC_RAW, &tv2); printf ("CPU TIME = %g seconds\n",(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +(double) (tv2.tv_sec - tv1.tv_sec)); // Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia // pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas //orizw to block ws imageW * imageH //desmeusi mnimis stin GPU hipMalloc((void**)&d_Filter,FILTER_LENGTH * sizeof(float)); hipMalloc((void**)&d_Input,imageW * imageH * sizeof(float)); hipMalloc((void**)&d_Buffer,imageW * imageH * sizeof(float)); hipMalloc((void**)&d_OutputGPU,imageW * imageH * sizeof(float)); //elegxos an desmeutike i mnimi stin GPU if(d_Filter==NULL||d_Input==NULL||d_Buffer==NULL||d_OutputGPU==NULL){ printf("couldn't allocate memory in GPU\n"); return 1; } hipEventRecord(start,0); hipMemcpy(d_Filter,h_Filter,FILTER_LENGTH * sizeof(float),hipMemcpyHostToDevice); hipMemcpy(d_Input,h_Input,imageW * imageH * sizeof(float),hipMemcpyHostToDevice); hipEventRecord(start,0); //kernel launch hipLaunchKernelGGL(( ConvolutionRowGPU), dim3(1),dim3(blockSize), 0, 0, d_Buffer, d_Input, d_Filter, filter_radius); // convolution kata grammes hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error != hipSuccess){ printf("CUDA Error: %s\n", hipGetErrorString(error)); return 1; } //kernel launch hipLaunchKernelGGL(( ConvolutionColGPU), dim3(1),dim3(blockSize), 0, 0, d_OutputGPU, d_Buffer, d_Filter, filter_radius); // convolution kata sthles hipEventRecord(stop,0); hipEventSynchronize(stop); //metafora dedomenwn apo tin GPU hipMemcpy(h_OutputGPU,d_OutputGPU,imageW * imageH * sizeof(float),hipMemcpyDeviceToHost); //elegxos gia sfalmata hipDeviceSynchronize(); error = hipGetLastError(); if(error != hipSuccess){ printf("CUDA Error: %s\n", hipGetErrorString(error)); return 1; } float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("GPU TIME = %f\n",milliseconds/1000); //elegxos apotelesmatos i=0; while (i<imageW*imageH){ if(ABS(h_OutputGPU[i]-h_OutputCPU[i])>accuracy){ printf("Accuracy Error, at element %d\n GPU result - CPU result = %f\n Aborting...\n",i,h_OutputGPU[i]-h_OutputCPU[i]); break; } i++; } // free all the allocated memory free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Filter); free(h_OutputGPU); hipFree(d_Input); hipFree(d_Buffer); hipFree(d_OutputGPU); hipFree(d_Filter); // Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA hipDeviceReset(); return 0; }
20d3cc4400eeeeabceaca717ffe6e7081d27fcad.cu
/* * This sample implements a separable convolution * of a 2D image with an arbitrary filter. */ #include <stdio.h> #include <stdlib.h> #include <time.h> unsigned int filter_radius; #define FILTER_LENGTH (2 * filter_radius + 1) #define ABS(val) ((val)<0.0 ? (-(val)) : (val)) //#define accuracy 0.05 #define accuracy 0.05 //////////////////////////////////////////////////////////////////////////////// // Row convolution kernel //////////////////////////////////////////////////////////////////////////////// __global__ void ConvolutionRowGPU(float *d_Dst,float *d_Src,float *d_Filter,int filterR){ int x =threadIdx.x; int y =threadIdx.y; int k; float sum=0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < blockDim.x) { sum += d_Src[y*blockDim.x+d] * d_Filter[filterR- k]; } d_Dst[y*blockDim.x+x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Column convolution kernel //////////////////////////////////////////////////////////////////////////////// __global__ void ConvolutionColGPU(float *d_Dst,float *d_Src,float *d_Filter,int filterR){ int x =threadIdx.x; int y =threadIdx.y; float sum=0; for (int k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < blockDim.y) { sum += d_Src[d * blockDim.x + x] * d_Filter[filterR - k]; } d_Dst[y * blockDim.x + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference row convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { float sum = 0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < imageW) { sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { float sum = 0; for (k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < imageH) { sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { float *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Filter, *d_Input, *d_Buffer, *d_OutputGPU; int imageW; int imageH; unsigned int i; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); struct timespec tv1, tv2; printf("Enter filter radius : "); scanf("%d", &filter_radius); // Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa, // dhladh imageW = imageH = N, opou to N to dinei o xrhsths. // Gia aplothta thewroume tetragwnikes eikones. printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH); scanf("%d", &imageW); imageH = imageW; dim3 blockSize(imageW,imageH); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); // Tha htan kalh idea na elegxete kai to apotelesma twn malloc... h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); // to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai // arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai // to convolution kai arxikopoieitai kai auth tuxaia. srand(200); for (i = 0; i < FILTER_LENGTH; i++) { h_Filter[i] = (float)(rand() % 16); } for (i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)rand() / ((float)RAND_MAX / 16); } // To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU. printf("CPU computation...\n"); clock_gettime(CLOCK_MONOTONIC_RAW, &tv1); convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles clock_gettime(CLOCK_MONOTONIC_RAW, &tv2); printf ("CPU TIME = %g seconds\n",(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +(double) (tv2.tv_sec - tv1.tv_sec)); // Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia // pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas //orizw to block ws imageW * imageH //desmeusi mnimis stin GPU cudaMalloc((void**)&d_Filter,FILTER_LENGTH * sizeof(float)); cudaMalloc((void**)&d_Input,imageW * imageH * sizeof(float)); cudaMalloc((void**)&d_Buffer,imageW * imageH * sizeof(float)); cudaMalloc((void**)&d_OutputGPU,imageW * imageH * sizeof(float)); //elegxos an desmeutike i mnimi stin GPU if(d_Filter==NULL||d_Input==NULL||d_Buffer==NULL||d_OutputGPU==NULL){ printf("couldn't allocate memory in GPU\n"); return 1; } cudaEventRecord(start,0); cudaMemcpy(d_Filter,h_Filter,FILTER_LENGTH * sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_Input,h_Input,imageW * imageH * sizeof(float),cudaMemcpyHostToDevice); cudaEventRecord(start,0); //kernel launch ConvolutionRowGPU<<<1,blockSize>>>(d_Buffer, d_Input, d_Filter, filter_radius); // convolution kata grammes cudaThreadSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess){ printf("CUDA Error: %s\n", cudaGetErrorString(error)); return 1; } //kernel launch ConvolutionColGPU<<<1,blockSize>>>(d_OutputGPU, d_Buffer, d_Filter, filter_radius); // convolution kata sthles cudaEventRecord(stop,0); cudaEventSynchronize(stop); //metafora dedomenwn apo tin GPU cudaMemcpy(h_OutputGPU,d_OutputGPU,imageW * imageH * sizeof(float),cudaMemcpyDeviceToHost); //elegxos gia sfalmata cudaThreadSynchronize(); error = cudaGetLastError(); if(error != cudaSuccess){ printf("CUDA Error: %s\n", cudaGetErrorString(error)); return 1; } float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("GPU TIME = %f\n",milliseconds/1000); //elegxos apotelesmatos i=0; while (i<imageW*imageH){ if(ABS(h_OutputGPU[i]-h_OutputCPU[i])>accuracy){ printf("Accuracy Error, at element %d\n GPU result - CPU result = %f\n Aborting...\n",i,h_OutputGPU[i]-h_OutputCPU[i]); break; } i++; } // free all the allocated memory free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Filter); free(h_OutputGPU); cudaFree(d_Input); cudaFree(d_Buffer); cudaFree(d_OutputGPU); cudaFree(d_Filter); // Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA cudaDeviceReset(); return 0; }
2f0d4253220b794f5b07abaa2e986d8c5fcde957.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ namespace fastertransformer { template <typename T, bool ALIVE = false> __global__ void init_kernel(bool* finished, int* sequence_length, int* word_ids, T* cum_log_probs, const int sentence_id, const int beam_width, const int batch_size) { const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16) ? HALF_FLT_MAX : 1e20f; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch_size * beam_width; index += blockDim.x * gridDim.x) { finished[index] = false; sequence_length[index] = 0; if (ALIVE) { if (index < batch_size * beam_width / 2) word_ids[index] = sentence_id; cum_log_probs[index] = (index % beam_width == beam_width / 2) ? (T)0.0f : -MAX_T_VAL; } else { word_ids[index] = sentence_id; cum_log_probs[index] = (index % beam_width == 0) ? (T)0.0f : -MAX_T_VAL; } } } template <typename T> void init_kernelLauncher_v2(bool* finished, int* sequence_length, int* word_ids, T* cum_log_probs, const int sentence_id, const int batch_size, const int beam_width, hipStream_t stream) { dim3 grid((int)ceil(batch_size * beam_width * 1.0 / 256)); dim3 block(256); hipLaunchKernelGGL(( init_kernel<T, true>), dim3(grid), dim3(block), 0, stream, finished, sequence_length, word_ids, cum_log_probs, sentence_id, beam_width, batch_size); } template <typename T> __global__ void embedding_position_lookups_bart_kernel( T* from_tensor, const T* embedding_table, const T* position_encoding, const int* word_ids, const int batch_size, const int hidden_units) { // 1. lookup from embedding table // 2. add the position encoding for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch_size * hidden_units; index += blockDim.x * gridDim.x) { const int row_index = index / hidden_units; const int col_index = index % hidden_units; from_tensor[index] = embedding_table[word_ids[row_index] * hidden_units + col_index] + position_encoding[col_index]; } } template <typename T> void embedding_position_lookups_bart_kernel_launcher(T* from_tensor, const T* embedding_table, const T* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, hipStream_t stream) { dim3 grid(min(batch_size, 65536)); dim3 block(min(hidden_units, 1024)); hipLaunchKernelGGL(( embedding_position_lookups_bart_kernel<T>), dim3(grid), dim3(block), 0, stream, from_tensor, embedding_table, position_encoding, word_ids, batch_size, hidden_units); } template void init_kernelLauncher_v2(bool* finished, int* sequence_length, int* word_ids, float* cum_log_probs, const int sentence_id, const int batch_size, const int beam_width, hipStream_t stream); template void init_kernelLauncher_v2(bool* finished, int* sequence_length, int* word_ids, half* cum_log_probs, const int sentence_id, const int batch_size, const int beam_width, hipStream_t stream); template void embedding_position_lookups_bart_kernel_launcher( float* from_tensor, const float* embedding_table, const float* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, hipStream_t stream); template void embedding_position_lookups_bart_kernel_launcher( half* from_tensor, const half* embedding_table, const half* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, hipStream_t stream); } // end of name space fastertransformer
2f0d4253220b794f5b07abaa2e986d8c5fcde957.cu
/* * Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ namespace fastertransformer { template <typename T, bool ALIVE = false> __global__ void init_kernel(bool* finished, int* sequence_length, int* word_ids, T* cum_log_probs, const int sentence_id, const int beam_width, const int batch_size) { const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16) ? HALF_FLT_MAX : 1e20f; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch_size * beam_width; index += blockDim.x * gridDim.x) { finished[index] = false; sequence_length[index] = 0; if (ALIVE) { if (index < batch_size * beam_width / 2) word_ids[index] = sentence_id; cum_log_probs[index] = (index % beam_width == beam_width / 2) ? (T)0.0f : -MAX_T_VAL; } else { word_ids[index] = sentence_id; cum_log_probs[index] = (index % beam_width == 0) ? (T)0.0f : -MAX_T_VAL; } } } template <typename T> void init_kernelLauncher_v2(bool* finished, int* sequence_length, int* word_ids, T* cum_log_probs, const int sentence_id, const int batch_size, const int beam_width, cudaStream_t stream) { dim3 grid((int)ceil(batch_size * beam_width * 1.0 / 256)); dim3 block(256); init_kernel<T, true><<<grid, block, 0, stream>>>(finished, sequence_length, word_ids, cum_log_probs, sentence_id, beam_width, batch_size); } template <typename T> __global__ void embedding_position_lookups_bart_kernel( T* from_tensor, const T* embedding_table, const T* position_encoding, const int* word_ids, const int batch_size, const int hidden_units) { // 1. lookup from embedding table // 2. add the position encoding for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch_size * hidden_units; index += blockDim.x * gridDim.x) { const int row_index = index / hidden_units; const int col_index = index % hidden_units; from_tensor[index] = embedding_table[word_ids[row_index] * hidden_units + col_index] + position_encoding[col_index]; } } template <typename T> void embedding_position_lookups_bart_kernel_launcher(T* from_tensor, const T* embedding_table, const T* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, cudaStream_t stream) { dim3 grid(min(batch_size, 65536)); dim3 block(min(hidden_units, 1024)); embedding_position_lookups_bart_kernel<T><<<grid, block, 0, stream>>>( from_tensor, embedding_table, position_encoding, word_ids, batch_size, hidden_units); } template void init_kernelLauncher_v2(bool* finished, int* sequence_length, int* word_ids, float* cum_log_probs, const int sentence_id, const int batch_size, const int beam_width, cudaStream_t stream); template void init_kernelLauncher_v2(bool* finished, int* sequence_length, int* word_ids, half* cum_log_probs, const int sentence_id, const int batch_size, const int beam_width, cudaStream_t stream); template void embedding_position_lookups_bart_kernel_launcher( float* from_tensor, const float* embedding_table, const float* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, cudaStream_t stream); template void embedding_position_lookups_bart_kernel_launcher( half* from_tensor, const half* embedding_table, const half* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, cudaStream_t stream); } // end of name space fastertransformer
e5887af5560c876a173c8ee824ecd73b6834ead9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <accelerate_cuda.h> extern "C" __global__ void generate(const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_0) { const int shapeSize = shOut_2 * (shOut_1 * shOut_0); const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) { const Int64 tmp_0 = ix; const Int64 tmp_1 = tmp_0 / shOut_0; const Int64 tmp_2 = tmp_1 / shOut_1; arrOut_0[ix] = 0.0; } }
e5887af5560c876a173c8ee824ecd73b6834ead9.cu
#include <accelerate_cuda.h> extern "C" __global__ void generate(const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_0) { const int shapeSize = shOut_2 * (shOut_1 * shOut_0); const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) { const Int64 tmp_0 = ix; const Int64 tmp_1 = tmp_0 / shOut_0; const Int64 tmp_2 = tmp_1 / shOut_1; arrOut_0[ix] = 0.0; } }
8968f6c70968ac4238f8a6cae112d702561208c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************ * 2D Convolution * 2D convolution of an arbitrary image matrix and an arbitrary filter. * * Usage: * Compile using nvcc -lcudart random_gens.cu convolution.cu -o problem4 * Run using ./problem4 <Size of the image> <Size of the filter> * * Notes: * Uncomment line number 157, if you try to run it in CSSC's computation * server * * Example: * ./problem4 150 5 * ************************************************/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include "random_gens.h" /** * Safety macro * Convenience macro which checks the output of all CUDA calls, and prints the verbose error incase of any */ #ifndef __CUDA_SAFE_CALL hipError_t __cuda_error; #define __CUDA_SAFE_CALL(call) do { __cuda_error = call; if(__cuda_error != hipSuccess) { fprintf(stderr,"CUDA Error: %s,%s, line %d\n",hipGetErrorString(__cuda_error), __FILE__, __LINE__ ); exit(-1);} } while(0) #endif #define KERNEL_BLOCK_SIZE 64 /** * Tuple for maintaining coordinates */ struct tuple{ int first; int second; }; /** * Copies a section of the window of image corresponding to the filter values which were copied. * * The filter is flipped for convolution. Instead of that, the image window is flipped, and the * filter is kept constant. * * To maintain a constant shared memory, blocks of KERNEL_BLOCK_SIZE are copied at once. As blocks * of 64 filter elements are copied at one go, the corresponding image elements are also copied * through this function. * * Parameters: * s_img - The shared memory space allocated for the image inside the block * img - The global memory copy of the image * img_size - Image dimensions * filter_size - Filter dimensions * k - Block number to be copied. */ __device__ void copyflippedwindow( float* s_img, float* img, struct tuple img_size, struct tuple filter_size, int k ) { int i = blockIdx.x; int j = blockIdx.y; // This may look weird. // n is the location of the filter element corresponding to the element corresponding to this // thread, when the filter is flattened to a 1D array. // i.e. Assume that this thread is trying to access the image element corresponding to (2,2) of the // filter( Assume a 5x5 filter ) when it is centered in (i,j) of the image. n now is 2*5 + 2 = 12, since // (2,2) will come at 12th position when flattened. int n = k*KERNEL_BLOCK_SIZE + threadIdx.x; // From n, we are calculating the coordinate i.e. (2,2) in the example in the above comment. // Why go through all this? // This implementation attempts to split a single convolution into a set of seperate vector // products and vector sums. But as the filter size grows, the number of values that has to be // copied grows. To ensure it works for scalable filter sizes, this splits any sized filter // into blocks of 64 elements each step. But 64 necessarily wont be aligned with the filter size, // i.e. 64 may not end nicely at the end of row. So simulating a flattening operation, and then // taking a chunk of 64 elements, looks like the only way out of this. int I = n / filter_size.second; int J = n % filter_size.second; // I,J( after this computation ) will be the actual image coordinates, which will get multiplied with // ( I-filtersize/2, J-filtersize/2 ) of the filter. // filtersize/2 is subtracted from J, because 0,0 of the filter lies at the center. But 0,0 of filter // array corresponds to the top left element of the filter. I = i - ( I - filter_size.first / 2 ); J = j - ( J - filter_size.second / 2 ); if( I < img_size.first && J < img_size.second && I >= 0 && J >= 0 ) s_img[ threadIdx.x ] = img[ I*img_size.second + J ]; else // Zero Padding s_img[ threadIdx.x ] = 0; __syncthreads(); } /** * Copies a section of the filter. * * For detailed explanation look at copyflippedimage function * * Parameters: * s_filter - The shared memory space allocated for the filter inside the block * img - The global memory copy of the filter * img_size - Image dimensions * filter_size - Filter dimensions * k - Block number to be copied. */ __device__ void copyfilter( float *s_filter, float *filter, struct tuple img_size, struct tuple filter_size, int k ) { int n = k*KERNEL_BLOCK_SIZE + threadIdx.x; int I = n / filter_size.second; int J = n % filter_size.second; //I = ( I - ( filter_size.first / 2 ) ); //J = ( J - ( filter_size.second / 2 ) ); if( I < filter_size.first && J < filter_size.second ) s_filter[ threadIdx.x ] = filter[ n ]; else s_filter[ threadIdx.x ] = 0; __syncthreads(); } /** * Element-wise multiplication of two arrays */ __device__ void outerproduct( float *img, float *flt, float *out ) { int i = threadIdx.x; out[ i ] = img[ i ]*flt[i]; __syncthreads(); } /** * Calculates the sum of an array */ __device__ void reducesum( float *in ) { int alive= KERNEL_BLOCK_SIZE >> 1; while( alive >= 1 ){ if( threadIdx.x < alive ){ in[ threadIdx.x ] += in[ threadIdx.x + alive ]; } else break; alive = alive >> 1; } __syncthreads(); } //J = threadIdx.y; /*** * Parallel convolution kernel. * * This kernel splits the job of the convolution into multiple parts by allocating seperate blocks for each * and every element of the output. * For each block, This kernel again splits the job into multiple loops of computing the elementwise product * and the sum, for blocks of KERNEL_BLOCK_SIZE elements. Detailed explanation available in copyflippedwindow * function. * * Parameters: * image - Global memory copy of the image * filter - Global memory copy of the filter * output - Global memory space for the output * img_size - Image size * filter_size - Filter size */ __global__ void convolve2d( float *image, float *filter, float *output, struct tuple img_size, struct tuple filter_size ) { int i,j; __shared__ float s_filter[KERNEL_BLOCK_SIZE]; __shared__ float s_image[KERNEL_BLOCK_SIZE]; __shared__ float temp[KERNEL_BLOCK_SIZE]; i = blockIdx.x; j = blockIdx.y; // Total no. of elements in the filter. int fsize = filter_size.first * filter_size.second; // No. of blocks required to complete one computation for one window. int blocks = ( fsize - 1 )/KERNEL_BLOCK_SIZE + 1; float accum = 0; for( int k=0; k<blocks; k++ ){ copyflippedwindow( (float *)&s_image, image, img_size, filter_size, k ); copyfilter( (float *)&s_filter, filter, img_size, filter_size, k ); outerproduct( (float *)&s_image, (float *)&s_filter, (float *)&temp ); reducesum( (float *)&temp ); //if( threadIdx.x == 0 ) accum += temp[0]; __syncthreads(); } if( threadIdx.x == 0 && i< img_size.first && j < img_size.second ) output[ i*img_size.second + j ] = accum; } void pconvolve2d( float *in, float *filter, float *__restrict__ out, int in_side, int filter_side ) { float *ga, *gf, *gb; int matrix_size = in_side * in_side * sizeof(float); int filter_size = filter_side * filter_side * sizeof(float); int dim_thread = KERNEL_BLOCK_SIZE; int num_blocks = in_side; dim3 block(dim_thread); dim3 grid(num_blocks,num_blocks); struct tuple fsize = {filter_side, filter_side }, isize= {in_side, in_side}; __CUDA_SAFE_CALL( hipMalloc( &ga, matrix_size ) ); __CUDA_SAFE_CALL( hipMalloc( &gf, filter_size ) ); __CUDA_SAFE_CALL( hipMalloc( &gb, matrix_size ) ); __CUDA_SAFE_CALL( hipMemcpy( ga, in, matrix_size, hipMemcpyHostToDevice ) ); __CUDA_SAFE_CALL( hipMemcpy( gf, filter, filter_size, hipMemcpyHostToDevice ) ); hipLaunchKernelGGL(( convolve2d), dim3(grid),dim3(block), 0, 0, ga, gf, gb, isize, fsize ); __CUDA_SAFE_CALL( hipMemcpy( out, gb, matrix_size, hipMemcpyDeviceToHost ) ); hipFree( ga ); hipFree( gf ); hipFree( gb ); } void sconvolve2d( float *in, float *filter, float *__restrict__ out, int in_side, int filter_side ) { float accum; int index[2]; // Lot to unpack here.. // The first set of loops, loops through each index in the output image. ( Since input and output // has the same size. ) for( int i=0; i<in_side; i++ ){ for( int j=0; j<in_side; j++ ){ out[ i*in_side + j ] = 0; accum = 0; // Since this is a convolution, the window is flipped. So the (-x/2,-x/2) of the filter // will be multiplied with ( x/2,x/2 ) of the image. (-x/2,-x/2) of the filter corresponds // to (0,0) in the filter array used here. index[0] = i + filter_side/2; // This loop, loops through the filter's X axis. for( int fi=0; fi<filter_side; fi++ ){ // Skip the entire inner loop if index is outside the bounds of the image. Since it // will be zero padded, the output is anyway a 0, and doesn't affect the computation. if( index[0] >= 0 && index[0] < in_side ){ // Second index calculation for window. index[1] = j + filter_side/2; // This is the inner filter loop, which loops through the filter's Y axis for( int fj=0; fj<filter_side; fj++ ){ // Skip computation, if index out of bounds. if( index[1] >= 0 && index[1] < in_side ) accum += in[ index[0]*in_side + index[1] ] * filter[ fi*filter_side + fj ]; // Index is getting decremented due to the fact that we have inverted the // window. index[1]--; } } index[0]--; } out[ i*in_side + j ] = accum; } } } void print_matrix( float *A, int side ) { //printf("A=\n"); for( int i=0; i<side; i++ ){ for( int j=0; j<side; j++ ){ printf("% 5.2f ",A[i*side + j]); } printf("\n"); } } int main( int argc, char* argv[] ) { /* Matrix container pointers */ float *A,*out; //float filter[]={ // 2,0,0, // 0,0,0, // 0,0,1 //}; float *filter; int size; /* Size of the matrix */ int filter_size; hipEvent_t start,stop; bool do_print=false; /* Debug flag to print matrices in case of small matrices */ float pms = 0, sms = 0; /* Parallel and sequential times */ if( argc != 3 ){ fprintf(stderr, "Atleast one argument required. Usage: %s <Side of the matrix> <filter size>",argv[0]); return -1; } /* Get size of the matrix from command line */ size = atoi( argv[1] ); filter_size = atoi( argv[2] ); if( size <= 12 ) do_print= true; A = (float *) malloc( sizeof(float)* size * size ); filter = (float *) malloc( sizeof(float) * filter_size * filter_size ); out = (float *) malloc( sizeof(float)* size * size ); generate_notso_random_matrix( A, size ); generate_notso_random_matrix( filter, filter_size ); if( do_print ){ printf("A=\n"); print_matrix( A, size ); printf("filter=\n"); print_matrix( filter, filter_size ); } /* Uncomment the below line to run this code in CSSC Computation server. CSSC's 0th device is always occupied and fails to allocate any size of memory consistently. */ __CUDA_SAFE_CALL( hipSetDevice(2) ); /* Timers to time the parallel process */ __CUDA_SAFE_CALL( hipEventCreate(&start) ); __CUDA_SAFE_CALL( hipEventCreate(&stop) ); /********************* * Start of RHS GPU run *******************/ __CUDA_SAFE_CALL( hipEventRecord(start) ); pconvolve2d( A, filter, out, size, filter_size ); /***************** * End of RHS run ****************/ __CUDA_SAFE_CALL( hipEventRecord(stop) ); __CUDA_SAFE_CALL( hipEventSynchronize(stop) ); __CUDA_SAFE_CALL( hipEventElapsedTime( &pms, start, stop ) ); if( do_print ){ printf("Out=\n"); print_matrix( out, size ); } struct timespec seq_start,seq_end; /* clock_gettime gets the process specific time spent, as opposed to the system time expended */ clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &seq_start ); sconvolve2d( A, filter, out, size, filter_size ); clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &seq_end ); /************************* * End of Sequential Stuff ************************/ if( do_print ){ printf("Out=\n"); print_matrix( out, size ); } /* Getting time in milliseconds for comparability */ sms = ( (float)seq_end.tv_sec - seq_start.tv_sec )*1000 + ( (float)seq_end.tv_nsec - seq_start.tv_nsec ) / 1000000; printf("%12s %12s %12s %12s\n","N","Parallel","Sequential","Speedup"); printf("%12d % 12f % 12f % 12f\n",size,pms,sms,sms/pms); free(A); free(filter); free(out); }
8968f6c70968ac4238f8a6cae112d702561208c3.cu
/************************************************ * 2D Convolution * 2D convolution of an arbitrary image matrix and an arbitrary filter. * * Usage: * Compile using nvcc -lcudart random_gens.cu convolution.cu -o problem4 * Run using ./problem4 <Size of the image> <Size of the filter> * * Notes: * Uncomment line number 157, if you try to run it in CSSC's computation * server * * Example: * ./problem4 150 5 * ************************************************/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include "random_gens.h" /** * Safety macro * Convenience macro which checks the output of all CUDA calls, and prints the verbose error incase of any */ #ifndef __CUDA_SAFE_CALL cudaError_t __cuda_error; #define __CUDA_SAFE_CALL(call) do { __cuda_error = call; if(__cuda_error != cudaSuccess) { fprintf(stderr,"CUDA Error: %s,%s, line %d\n",cudaGetErrorString(__cuda_error), __FILE__, __LINE__ ); exit(-1);} } while(0) #endif #define KERNEL_BLOCK_SIZE 64 /** * Tuple for maintaining coordinates */ struct tuple{ int first; int second; }; /** * Copies a section of the window of image corresponding to the filter values which were copied. * * The filter is flipped for convolution. Instead of that, the image window is flipped, and the * filter is kept constant. * * To maintain a constant shared memory, blocks of KERNEL_BLOCK_SIZE are copied at once. As blocks * of 64 filter elements are copied at one go, the corresponding image elements are also copied * through this function. * * Parameters: * s_img - The shared memory space allocated for the image inside the block * img - The global memory copy of the image * img_size - Image dimensions * filter_size - Filter dimensions * k - Block number to be copied. */ __device__ void copyflippedwindow( float* s_img, float* img, struct tuple img_size, struct tuple filter_size, int k ) { int i = blockIdx.x; int j = blockIdx.y; // This may look weird. // n is the location of the filter element corresponding to the element corresponding to this // thread, when the filter is flattened to a 1D array. // i.e. Assume that this thread is trying to access the image element corresponding to (2,2) of the // filter( Assume a 5x5 filter ) when it is centered in (i,j) of the image. n now is 2*5 + 2 = 12, since // (2,2) will come at 12th position when flattened. int n = k*KERNEL_BLOCK_SIZE + threadIdx.x; // From n, we are calculating the coordinate i.e. (2,2) in the example in the above comment. // Why go through all this? // This implementation attempts to split a single convolution into a set of seperate vector // products and vector sums. But as the filter size grows, the number of values that has to be // copied grows. To ensure it works for scalable filter sizes, this splits any sized filter // into blocks of 64 elements each step. But 64 necessarily wont be aligned with the filter size, // i.e. 64 may not end nicely at the end of row. So simulating a flattening operation, and then // taking a chunk of 64 elements, looks like the only way out of this. int I = n / filter_size.second; int J = n % filter_size.second; // I,J( after this computation ) will be the actual image coordinates, which will get multiplied with // ( I-filtersize/2, J-filtersize/2 ) of the filter. // filtersize/2 is subtracted from J, because 0,0 of the filter lies at the center. But 0,0 of filter // array corresponds to the top left element of the filter. I = i - ( I - filter_size.first / 2 ); J = j - ( J - filter_size.second / 2 ); if( I < img_size.first && J < img_size.second && I >= 0 && J >= 0 ) s_img[ threadIdx.x ] = img[ I*img_size.second + J ]; else // Zero Padding s_img[ threadIdx.x ] = 0; __syncthreads(); } /** * Copies a section of the filter. * * For detailed explanation look at copyflippedimage function * * Parameters: * s_filter - The shared memory space allocated for the filter inside the block * img - The global memory copy of the filter * img_size - Image dimensions * filter_size - Filter dimensions * k - Block number to be copied. */ __device__ void copyfilter( float *s_filter, float *filter, struct tuple img_size, struct tuple filter_size, int k ) { int n = k*KERNEL_BLOCK_SIZE + threadIdx.x; int I = n / filter_size.second; int J = n % filter_size.second; //I = ( I - ( filter_size.first / 2 ) ); //J = ( J - ( filter_size.second / 2 ) ); if( I < filter_size.first && J < filter_size.second ) s_filter[ threadIdx.x ] = filter[ n ]; else s_filter[ threadIdx.x ] = 0; __syncthreads(); } /** * Element-wise multiplication of two arrays */ __device__ void outerproduct( float *img, float *flt, float *out ) { int i = threadIdx.x; out[ i ] = img[ i ]*flt[i]; __syncthreads(); } /** * Calculates the sum of an array */ __device__ void reducesum( float *in ) { int alive= KERNEL_BLOCK_SIZE >> 1; while( alive >= 1 ){ if( threadIdx.x < alive ){ in[ threadIdx.x ] += in[ threadIdx.x + alive ]; } else break; alive = alive >> 1; } __syncthreads(); } //J = threadIdx.y; /*** * Parallel convolution kernel. * * This kernel splits the job of the convolution into multiple parts by allocating seperate blocks for each * and every element of the output. * For each block, This kernel again splits the job into multiple loops of computing the elementwise product * and the sum, for blocks of KERNEL_BLOCK_SIZE elements. Detailed explanation available in copyflippedwindow * function. * * Parameters: * image - Global memory copy of the image * filter - Global memory copy of the filter * output - Global memory space for the output * img_size - Image size * filter_size - Filter size */ __global__ void convolve2d( float *image, float *filter, float *output, struct tuple img_size, struct tuple filter_size ) { int i,j; __shared__ float s_filter[KERNEL_BLOCK_SIZE]; __shared__ float s_image[KERNEL_BLOCK_SIZE]; __shared__ float temp[KERNEL_BLOCK_SIZE]; i = blockIdx.x; j = blockIdx.y; // Total no. of elements in the filter. int fsize = filter_size.first * filter_size.second; // No. of blocks required to complete one computation for one window. int blocks = ( fsize - 1 )/KERNEL_BLOCK_SIZE + 1; float accum = 0; for( int k=0; k<blocks; k++ ){ copyflippedwindow( (float *)&s_image, image, img_size, filter_size, k ); copyfilter( (float *)&s_filter, filter, img_size, filter_size, k ); outerproduct( (float *)&s_image, (float *)&s_filter, (float *)&temp ); reducesum( (float *)&temp ); //if( threadIdx.x == 0 ) accum += temp[0]; __syncthreads(); } if( threadIdx.x == 0 && i< img_size.first && j < img_size.second ) output[ i*img_size.second + j ] = accum; } void pconvolve2d( float *in, float *filter, float *__restrict__ out, int in_side, int filter_side ) { float *ga, *gf, *gb; int matrix_size = in_side * in_side * sizeof(float); int filter_size = filter_side * filter_side * sizeof(float); int dim_thread = KERNEL_BLOCK_SIZE; int num_blocks = in_side; dim3 block(dim_thread); dim3 grid(num_blocks,num_blocks); struct tuple fsize = {filter_side, filter_side }, isize= {in_side, in_side}; __CUDA_SAFE_CALL( cudaMalloc( &ga, matrix_size ) ); __CUDA_SAFE_CALL( cudaMalloc( &gf, filter_size ) ); __CUDA_SAFE_CALL( cudaMalloc( &gb, matrix_size ) ); __CUDA_SAFE_CALL( cudaMemcpy( ga, in, matrix_size, cudaMemcpyHostToDevice ) ); __CUDA_SAFE_CALL( cudaMemcpy( gf, filter, filter_size, cudaMemcpyHostToDevice ) ); convolve2d<<<grid,block>>>( ga, gf, gb, isize, fsize ); __CUDA_SAFE_CALL( cudaMemcpy( out, gb, matrix_size, cudaMemcpyDeviceToHost ) ); cudaFree( ga ); cudaFree( gf ); cudaFree( gb ); } void sconvolve2d( float *in, float *filter, float *__restrict__ out, int in_side, int filter_side ) { float accum; int index[2]; // Lot to unpack here.. // The first set of loops, loops through each index in the output image. ( Since input and output // has the same size. ) for( int i=0; i<in_side; i++ ){ for( int j=0; j<in_side; j++ ){ out[ i*in_side + j ] = 0; accum = 0; // Since this is a convolution, the window is flipped. So the (-x/2,-x/2) of the filter // will be multiplied with ( x/2,x/2 ) of the image. (-x/2,-x/2) of the filter corresponds // to (0,0) in the filter array used here. index[0] = i + filter_side/2; // This loop, loops through the filter's X axis. for( int fi=0; fi<filter_side; fi++ ){ // Skip the entire inner loop if index is outside the bounds of the image. Since it // will be zero padded, the output is anyway a 0, and doesn't affect the computation. if( index[0] >= 0 && index[0] < in_side ){ // Second index calculation for window. index[1] = j + filter_side/2; // This is the inner filter loop, which loops through the filter's Y axis for( int fj=0; fj<filter_side; fj++ ){ // Skip computation, if index out of bounds. if( index[1] >= 0 && index[1] < in_side ) accum += in[ index[0]*in_side + index[1] ] * filter[ fi*filter_side + fj ]; // Index is getting decremented due to the fact that we have inverted the // window. index[1]--; } } index[0]--; } out[ i*in_side + j ] = accum; } } } void print_matrix( float *A, int side ) { //printf("A=\n"); for( int i=0; i<side; i++ ){ for( int j=0; j<side; j++ ){ printf("% 5.2f ",A[i*side + j]); } printf("\n"); } } int main( int argc, char* argv[] ) { /* Matrix container pointers */ float *A,*out; //float filter[]={ // 2,0,0, // 0,0,0, // 0,0,1 //}; float *filter; int size; /* Size of the matrix */ int filter_size; cudaEvent_t start,stop; bool do_print=false; /* Debug flag to print matrices in case of small matrices */ float pms = 0, sms = 0; /* Parallel and sequential times */ if( argc != 3 ){ fprintf(stderr, "Atleast one argument required. Usage: %s <Side of the matrix> <filter size>",argv[0]); return -1; } /* Get size of the matrix from command line */ size = atoi( argv[1] ); filter_size = atoi( argv[2] ); if( size <= 12 ) do_print= true; A = (float *) malloc( sizeof(float)* size * size ); filter = (float *) malloc( sizeof(float) * filter_size * filter_size ); out = (float *) malloc( sizeof(float)* size * size ); generate_notso_random_matrix( A, size ); generate_notso_random_matrix( filter, filter_size ); if( do_print ){ printf("A=\n"); print_matrix( A, size ); printf("filter=\n"); print_matrix( filter, filter_size ); } /* Uncomment the below line to run this code in CSSC Computation server. CSSC's 0th device is always occupied and fails to allocate any size of memory consistently. */ __CUDA_SAFE_CALL( cudaSetDevice(2) ); /* Timers to time the parallel process */ __CUDA_SAFE_CALL( cudaEventCreate(&start) ); __CUDA_SAFE_CALL( cudaEventCreate(&stop) ); /********************* * Start of RHS GPU run *******************/ __CUDA_SAFE_CALL( cudaEventRecord(start) ); pconvolve2d( A, filter, out, size, filter_size ); /***************** * End of RHS run ****************/ __CUDA_SAFE_CALL( cudaEventRecord(stop) ); __CUDA_SAFE_CALL( cudaEventSynchronize(stop) ); __CUDA_SAFE_CALL( cudaEventElapsedTime( &pms, start, stop ) ); if( do_print ){ printf("Out=\n"); print_matrix( out, size ); } struct timespec seq_start,seq_end; /* clock_gettime gets the process specific time spent, as opposed to the system time expended */ clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &seq_start ); sconvolve2d( A, filter, out, size, filter_size ); clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &seq_end ); /************************* * End of Sequential Stuff ************************/ if( do_print ){ printf("Out=\n"); print_matrix( out, size ); } /* Getting time in milliseconds for comparability */ sms = ( (float)seq_end.tv_sec - seq_start.tv_sec )*1000 + ( (float)seq_end.tv_nsec - seq_start.tv_nsec ) / 1000000; printf("%12s %12s %12s %12s\n","N","Parallel","Sequential","Speedup"); printf("%12d % 12f % 12f % 12f\n",size,pms,sms,sms/pms); free(A); free(filter); free(out); }
3f0212c344d4114134623f0ca06f94646e0f545f.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "kernel.hip" #include "support.h" #define trainNum 100000 // number of train data #define testNum 1000 // number of test data #define inLayout 10 // number of input layer's neurons #define hideLayout 8 // number of hidden layer's neurons #define outLayout 1 // number of output layer's neurons #define initWeightMax 0.5 // max value of initial weight #define eta (0.1f) // learn rate #define iterMax 10000 // max iteration times #define batchNum 32 // number of batches #define BLOCKSIZE 16 #define BLOCKSIZE_32 32 int main (int argc, char *argv[]) { Timer timer; hipError_t cuda_ret; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); float *inputTrain, *inputTest, *outputTrain, *outputTest; inputTrain = (float*)malloc(100000 * 10 * sizeof(float)); inputTest = (float*)malloc(1000 * 10 * sizeof(float)); outputTrain = (float*)malloc(100000 * 1 * sizeof(float)); outputTest = (float*)malloc(1000 * 1 * sizeof(float)); dim3 dim_grid, dim_block; int sumTrain = 0, sumTest = 0; for (unsigned int i=0; i < 1000000; i++) { inputTrain[i] = rand()%2; sumTrain += inputTrain[i]; if(i % 10 == 9){ outputTrain[i / 10] = sumTrain % 2; sumTrain = 0; } } for (unsigned int i=0; i < 10000; i++) { inputTest[i] = rand()%2; sumTest += inputTest[i]; if(i % 10 == 9){ outputTest[i / 10] = sumTest % 2; sumTest = 0; } } stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel using standard sgemm interface --------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); BpMain(inputTrain, inputTest, outputTrain, outputTest); cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Free memory ------------------------------------------------------------ free(inputTrain); free(inputTest); free(outputTrain); free(outputTest); return 0; }
3f0212c344d4114134623f0ca06f94646e0f545f.cu
/****************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "kernel.cu" #include "support.h" #define trainNum 100000 // number of train data #define testNum 1000 // number of test data #define inLayout 10 // number of input layer's neurons #define hideLayout 8 // number of hidden layer's neurons #define outLayout 1 // number of output layer's neurons #define initWeightMax 0.5 // max value of initial weight #define eta (0.1f) // learn rate #define iterMax 10000 // max iteration times #define batchNum 32 // number of batches #define BLOCKSIZE 16 #define BLOCKSIZE_32 32 int main (int argc, char *argv[]) { Timer timer; cudaError_t cuda_ret; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); float *inputTrain, *inputTest, *outputTrain, *outputTest; inputTrain = (float*)malloc(100000 * 10 * sizeof(float)); inputTest = (float*)malloc(1000 * 10 * sizeof(float)); outputTrain = (float*)malloc(100000 * 1 * sizeof(float)); outputTest = (float*)malloc(1000 * 1 * sizeof(float)); dim3 dim_grid, dim_block; int sumTrain = 0, sumTest = 0; for (unsigned int i=0; i < 1000000; i++) { inputTrain[i] = rand()%2; sumTrain += inputTrain[i]; if(i % 10 == 9){ outputTrain[i / 10] = sumTrain % 2; sumTrain = 0; } } for (unsigned int i=0; i < 10000; i++) { inputTest[i] = rand()%2; sumTest += inputTest[i]; if(i % 10 == 9){ outputTest[i / 10] = sumTest % 2; sumTest = 0; } } stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel using standard sgemm interface --------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); BpMain(inputTrain, inputTest, outputTrain, outputTest); cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Free memory ------------------------------------------------------------ free(inputTrain); free(inputTest); free(outputTrain); free(outputTest); return 0; }
aab790935ffff650a1a3da9079a2831b2fb77f8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include <assert.h> extern "C" { #include "blas.h" #include "hip/hip_runtime.h" #include "utils.h" } __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); hipLaunchKernelGGL(( scale_bias_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, biases, n, size); check_error(hipPeekAtLastError()); } __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index]*x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, 0, x_norm, delta, batch, n, size, scale_updates); check_error(hipPeekAtLastError()); } __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; output[(k*n+j)*size + i] += biases[j]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { int num = n*size*batch; hipLaunchKernelGGL(( add_bias_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, output, biases, batch, n, size); check_error(hipPeekAtLastError()); } __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; int b; float sum = 0; for(b = 0; b < batch; ++b){ int i = b*n + index; sum += delta[i]; } bias_updates[index] += sum; } __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; } } void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) { if(size == 1){ hipLaunchKernelGGL(( backward_bias_conn_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n); }else{ hipLaunchKernelGGL(( backward_bias_kernel), dim3(n), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n, size); } check_error(hipPeekAtLastError()); } /* __global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int f1 = index / n; int f2 = index % n; if (f2 <= f1) return; float sum = 0; float norm1 = 0; float norm2 = 0; int b, i; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; sum += output[i1] * output[i2]; norm1 += output[i1] * output[i1]; norm2 += output[i2] * output[i2]; } } norm1 = sqrt(norm1); norm2 = sqrt(norm2); float norm = norm1 * norm2; sum = sum / norm; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; delta[i1] += - scale * sum * output[i2] / norm; delta[i2] += - scale * sum * output[i1] / norm; } } } void dot_error_gpu(layer l) { dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu); check_error(hipPeekAtLastError()); } */ __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; float mhat = m[index] / (1.f - powf(B1, t)); float vhat = v[index] / (1.f - powf(B2, t)); x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps); } extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { hipLaunchKernelGGL(( adam_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, x, m, v, B1, B2, rate, eps, t); check_error(hipPeekAtLastError()); } extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_gpu(n, B1, m, 1); scal_gpu(n, B2, v, 1); axpy_gpu(n, -decay*batch, w, 1, d, 1); axpy_gpu(n, (1-B1), d, 1, m, 1); mul_gpu(n, d, 1, d, 1); axpy_gpu(n, (1-B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_gpu(n, 0, d, 1); } __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f)); } __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); } extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { size_t N = batch*filters*spatial; hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); check_error(hipPeekAtLastError()); } __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; variance_delta[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance_delta[i] += delta[index]*(x[index] - mean[i]); } } variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f)); } __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } __syncthreads(); if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f)); } } __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; } } __syncthreads(); if(id == 0){ variance_delta[filter] = 0; for(i = 0; i < threads; ++i){ variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f)); } } __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j*filters*spatial + i*spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f)); } extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta); check_error(hipPeekAtLastError()); } extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta); check_error(hipPeekAtLastError()); } extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, delta, mean, variance, batch, filters, spatial, variance_delta); check_error(hipPeekAtLastError()); } __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1.f/(batch * spatial); int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1.f/(batch * spatial - 1); int j,k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += powf((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; //printf("%d\n", offset); int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); // printf("%d %d %d\n", w2, h2, c2); //printf("%d %d\n", in_index, out_index); //if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; //if(forward) out[1] = x[1]; //else out[0] = x[0]; } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; } __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX])); } __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0; } } __global__ void add_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] += ALPHA; } __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] *= ALPHA; } __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] *= X[i*INCX]; } extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; hipLaunchKernelGGL(( normalize_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, batch, filters, spatial); check_error(hipPeekAtLastError()); } __global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int b = index / spatial; int i = index % spatial; int f; float sum = 0; for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; sum += powf(x[index], 2); } sum = sqrtf(sum); if(sum == 0) sum = 1; //printf("%f\n", sum); for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; x[index] /= sum; dx[index] = (1 - x[index]) / sum; } } extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial) { size_t N = batch*spatial; hipLaunchKernelGGL(( l2norm_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, dx, batch, filters, spatial); check_error(hipPeekAtLastError()); } __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x[index] : 0; } } __syncthreads(); if(id == 0){ mean[filter] = 0; for(i = 0; i < threads; ++i){ mean[filter] += local[i]; } mean[filter] /= spatial * batch; } } __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if(id == 0){ variance[filter] = 0; for(i = 0; i < threads; ++i){ variance[filter] += local[i]; } variance[filter] /= (spatial * batch - 1); } } extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { hipLaunchKernelGGL(( fast_mean_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean); check_error(hipPeekAtLastError()); } extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance); check_error(hipPeekAtLastError()); } extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean); check_error(hipPeekAtLastError()); } extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance); check_error(hipPeekAtLastError()); } extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) { copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY); } extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_s = i%spatial; i = i/spatial; int in_c = i%layers; i = i/layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out) { int size = spatial*batch*layers; hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, spatial, layers, batch, forward, out); check_error(hipPeekAtLastError()); } extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int size = w*h*c*batch; hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, w, h, c, batch, stride, forward, out); check_error(hipPeekAtLastError()); } __global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = val; } extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val) { hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, val); check_error(hipPeekAtLastError()); } __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] *= scale; } extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) { hipLaunchKernelGGL(( scale_mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, scale); check_error(hipPeekAtLastError()); } extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( supp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( fill_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; //out[out_index] += add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int size = batch * minw * minh * minc; hipLaunchKernelGGL(( shortcut_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out); check_error(hipPeekAtLastError()); } __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; float abs_val = fabsf(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( smooth_l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( softmax_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001); delta[i] = t-p; } } extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( logistic_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = diff * diff; //I know this is technically wrong, deal with it. delta[i] = diff; } } extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( l2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = abs(diff); delta[i] = (diff > 0) ? 1 : -1; } } extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ error[i] = truth[i] ? -pred[i] : pred[i]; delta[i] = (truth[i] > 0) ? 1 : -1; } } extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( wgan_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ if(X) X[b*NX + j] += OUT[i]; } else { if(Y) Y[b*NY + j - NX] += OUT[i]; } } } extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { hipLaunchKernelGGL(( deinter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT); check_error(hipPeekAtLastError()); } __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ OUT[i] = X[b*NX + j]; } else { OUT[i] = Y[b*NY + j - NX]; } } } extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { hipLaunchKernelGGL(( inter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT); check_error(hipPeekAtLastError()); } extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) { hipLaunchKernelGGL(( weighted_sum_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, c); check_error(hipPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) { hipLaunchKernelGGL(( weighted_delta_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, da, db, ds, dc); check_error(hipPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] += a[i]*b[i]; } } extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) { hipLaunchKernelGGL(( mult_add_into_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, c); check_error(hipPeekAtLastError()); } __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = expf(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= spatial*batch*groups) return; int s = id % spatial; id = id / spatial; int g = id % groups; int b = id / groups; int goff = group_offset[g]*spatial; int boff = b*stride; softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s); } extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) { int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); /* static int *tree_groups_size = 0; static int *tree_groups_offset = 0; if(!tree_groups_size){ tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); } */ int num = spatial*batch*hier.groups; hipLaunchKernelGGL(( softmax_tree_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset); check_error(hipPeekAtLastError()); cuda_free((float *)tree_groups_size); cuda_free((float *)tree_groups_offset); } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { hipLaunchKernelGGL(( softmax_kernel), dim3(cuda_gridsize(batch*groups)), dim3(BLOCK), 0, 0, input, n, batch, batch_offset, groups, group_offset, stride, temp, output); check_error(hipPeekAtLastError()); } /* __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int out_index = i; int out_w = i%(w*stride); i = i/(w*stride); int out_h = i%(h*stride); i = i/(h*stride); int out_c = i%c; i = i/c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if(forward) out[out_index] += scale * x[in_index]; else atomicAdd(x+in_index, scale * out[out_index]); } extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t size = w*h*c*batch*stride*stride; upsample_kernel<<<cuda_gridsize(size), BLOCK>>>(size, in, w, h, c, batch, stride, forward, scale, out); check_error(hipPeekAtLastError()); } */ __global__ void upsample_kernel(size_t N, int batch, int forward, int c, float ratio_w, float ratio_h, float *in, int in_w, int in_h, float *out, int out_w, int out_h) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x + threadIdx.x; if (i >= N) return; int out_idx = i; int ow = i % out_w; i /= out_w; int oh = i % out_h; i /= out_h; int oc = i % c; i /= c; int b = i % batch; int iw = ow*ratio_w; int ih = oh*ratio_h; int ic = oc; int in_idx = b*in_w*in_h*c + ic*in_w*in_h + ih*in_w + iw; if (forward) out[out_idx] += in[in_idx]; else atomicAdd(in + in_idx, out[out_idx]); } extern "C" void upsample_gpu(int batch, int c, int forward, float *in, int in_w, int in_h, float *out, int out_w, int out_h) { size_t size = out_w*out_h*c*batch; const float ratio_w = (float)in_w / out_w; const float ratio_h = (float)in_h / out_h; hipLaunchKernelGGL(( upsample_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, batch, forward, c, ratio_w, ratio_h, in, in_w, in_h, out, out_w, out_h); check_error(hipPeekAtLastError()); }
aab790935ffff650a1a3da9079a2831b2fb77f8b.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include <assert.h> extern "C" { #include "blas.h" #include "cuda.h" #include "utils.h" } __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size); check_error(cudaPeekAtLastError()); } __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index]*x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates); check_error(cudaPeekAtLastError()); } __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; output[(k*n+j)*size + i] += biases[j]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { int num = n*size*batch; add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size); check_error(cudaPeekAtLastError()); } __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; int b; float sum = 0; for(b = 0; b < batch; ++b){ int i = b*n + index; sum += delta[i]; } bias_updates[index] += sum; } __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; } } void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) { if(size == 1){ backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n); }else{ backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size); } check_error(cudaPeekAtLastError()); } /* __global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int f1 = index / n; int f2 = index % n; if (f2 <= f1) return; float sum = 0; float norm1 = 0; float norm2 = 0; int b, i; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; sum += output[i1] * output[i2]; norm1 += output[i1] * output[i1]; norm2 += output[i2] * output[i2]; } } norm1 = sqrt(norm1); norm2 = sqrt(norm2); float norm = norm1 * norm2; sum = sum / norm; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; delta[i1] += - scale * sum * output[i2] / norm; delta[i2] += - scale * sum * output[i1] / norm; } } } void dot_error_gpu(layer l) { dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu); check_error(cudaPeekAtLastError()); } */ __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; float mhat = m[index] / (1.f - powf(B1, t)); float vhat = v[index] / (1.f - powf(B2, t)); x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps); } extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps, t); check_error(cudaPeekAtLastError()); } extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_gpu(n, B1, m, 1); scal_gpu(n, B2, v, 1); axpy_gpu(n, -decay*batch, w, 1, d, 1); axpy_gpu(n, (1-B1), d, 1, m, 1); mul_gpu(n, d, 1, d, 1); axpy_gpu(n, (1-B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_gpu(n, 0, d, 1); } __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f)); } __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); } extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { size_t N = batch*filters*spatial; normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); check_error(cudaPeekAtLastError()); } __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; variance_delta[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance_delta[i] += delta[index]*(x[index] - mean[i]); } } variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f)); } __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } __syncthreads(); if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f)); } } __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; } } __syncthreads(); if(id == 0){ variance_delta[filter] = 0; for(i = 0; i < threads; ++i){ variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f)); } } __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j*filters*spatial + i*spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f)); } extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); check_error(cudaPeekAtLastError()); } extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); check_error(cudaPeekAtLastError()); } extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta); check_error(cudaPeekAtLastError()); } __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1.f/(batch * spatial); int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1.f/(batch * spatial - 1); int j,k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += powf((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; //printf("%d\n", offset); int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); // printf("%d %d %d\n", w2, h2, c2); //printf("%d %d\n", in_index, out_index); //if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; //if(forward) out[1] = x[1]; //else out[0] = x[0]; } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; } __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX])); } __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0; } } __global__ void add_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] += ALPHA; } __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] *= ALPHA; } __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] *= X[i*INCX]; } extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial); check_error(cudaPeekAtLastError()); } __global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int b = index / spatial; int i = index % spatial; int f; float sum = 0; for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; sum += powf(x[index], 2); } sum = sqrtf(sum); if(sum == 0) sum = 1; //printf("%f\n", sum); for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; x[index] /= sum; dx[index] = (1 - x[index]) / sum; } } extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial) { size_t N = batch*spatial; l2norm_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, dx, batch, filters, spatial); check_error(cudaPeekAtLastError()); } __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x[index] : 0; } } __syncthreads(); if(id == 0){ mean[filter] = 0; for(i = 0; i < threads; ++i){ mean[filter] += local[i]; } mean[filter] /= spatial * batch; } } __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if(id == 0){ variance[filter] = 0; for(i = 0; i < threads; ++i){ variance[filter] += local[i]; } variance[filter] /= (spatial * batch - 1); } } extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean); check_error(cudaPeekAtLastError()); } extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance); check_error(cudaPeekAtLastError()); } extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean); check_error(cudaPeekAtLastError()); } extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance); check_error(cudaPeekAtLastError()); } extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) { copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY); } extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) { mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_s = i%spatial; i = i/spatial; int in_c = i%layers; i = i/layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out) { int size = spatial*batch*layers; flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out); check_error(cudaPeekAtLastError()); } extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int size = w*h*c*batch; reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out); check_error(cudaPeekAtLastError()); } __global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = val; } extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val) { mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, val); check_error(cudaPeekAtLastError()); } __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] *= scale; } extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) { scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale); check_error(cudaPeekAtLastError()); } extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) { const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) { constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) { add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) { scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) { supp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) { fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; //out[out_index] += add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int size = batch * minw * minh * minc; shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out); check_error(cudaPeekAtLastError()); } __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; float abs_val = fabsf(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { softmax_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001); delta[i] = t-p; } } extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { logistic_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = diff * diff; //I know this is technically wrong, deal with it. delta[i] = diff; } } extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) { l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = abs(diff); delta[i] = (diff > 0) ? 1 : -1; } } extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ error[i] = truth[i] ? -pred[i] : pred[i]; delta[i] = (truth[i] > 0) ? 1 : -1; } } extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error) { wgan_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ if(X) X[b*NX + j] += OUT[i]; } else { if(Y) Y[b*NY + j - NX] += OUT[i]; } } } extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { deinter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT); check_error(cudaPeekAtLastError()); } __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ OUT[i] = X[b*NX + j]; } else { OUT[i] = Y[b*NY + j - NX]; } } } extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { inter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT); check_error(cudaPeekAtLastError()); } extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) { weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c); check_error(cudaPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) { weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc); check_error(cudaPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] += a[i]*b[i]; } } extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) { mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c); check_error(cudaPeekAtLastError()); } __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = expf(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= spatial*batch*groups) return; int s = id % spatial; id = id / spatial; int g = id % groups; int b = id / groups; int goff = group_offset[g]*spatial; int boff = b*stride; softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s); } extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) { int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); /* static int *tree_groups_size = 0; static int *tree_groups_offset = 0; if(!tree_groups_size){ tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); } */ int num = spatial*batch*hier.groups; softmax_tree_kernel<<<cuda_gridsize(num), BLOCK>>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset); check_error(cudaPeekAtLastError()); cuda_free((float *)tree_groups_size); cuda_free((float *)tree_groups_offset); } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output); check_error(cudaPeekAtLastError()); } /* __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int out_index = i; int out_w = i%(w*stride); i = i/(w*stride); int out_h = i%(h*stride); i = i/(h*stride); int out_c = i%c; i = i/c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if(forward) out[out_index] += scale * x[in_index]; else atomicAdd(x+in_index, scale * out[out_index]); } extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t size = w*h*c*batch*stride*stride; upsample_kernel<<<cuda_gridsize(size), BLOCK>>>(size, in, w, h, c, batch, stride, forward, scale, out); check_error(cudaPeekAtLastError()); } */ __global__ void upsample_kernel(size_t N, int batch, int forward, int c, float ratio_w, float ratio_h, float *in, int in_w, int in_h, float *out, int out_w, int out_h) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x + threadIdx.x; if (i >= N) return; int out_idx = i; int ow = i % out_w; i /= out_w; int oh = i % out_h; i /= out_h; int oc = i % c; i /= c; int b = i % batch; int iw = ow*ratio_w; int ih = oh*ratio_h; int ic = oc; int in_idx = b*in_w*in_h*c + ic*in_w*in_h + ih*in_w + iw; if (forward) out[out_idx] += in[in_idx]; else atomicAdd(in + in_idx, out[out_idx]); } extern "C" void upsample_gpu(int batch, int c, int forward, float *in, int in_w, int in_h, float *out, int out_w, int out_h) { size_t size = out_w*out_h*c*batch; const float ratio_w = (float)in_w / out_w; const float ratio_h = (float)in_h / out_h; upsample_kernel<<<cuda_gridsize(size), BLOCK>>>(size, batch, forward, c, ratio_w, ratio_h, in, in_w, in_h, out, out_w, out_h); check_error(cudaPeekAtLastError()); }
75f13b783f7ff2f6954d7568e2f237b263560e7a.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <iostream> #include <iomanip> #include <fstream> #include <ctime> // Definitions #define M_PI 3.14276 #define c 299792458 #define mu0 M_PI*4e-7 #define eta0 c*mu0 // CPU function for source calculation void stageSource(double* V1, double* V2, double* V3, double* V4, int x, int y, double E0, int NY) { /* Stage 1: Source */ // Adapted to be 1D V1[x * NY + y] = V1[x * NY + y] + E0; V2[x * NY + y] = V2[x * NY + y] - E0; V3[x * NY + y] = V3[x * NY + y] - E0; V4[x * NY + y] = V4[x * NY + y] + E0; // Using 1 dimensional arrays is more obvious to work with when porting to GPU } // end func // CPU function void stageScatter(double* V1, double* V2, double* V3, double* V4, int NX, int NY, double Z) { /* Stage 2: Scatter */ // Variables double I = 0, V = 0; // Parallelisable code // for int i = 0; i < NX*NY; i++ for (int x = 0; x < NX; x++) { for (int y = 0; y < NY; y++) { I = (V1[(x * NY) + y] + V4[(x * NY) + y] - V2[(x * NY) + y] - V3[(x * NY) + y]) / (2); // factorized by 2 for unnecessary mathematics V = 2 * V1[x * NY + y] - I; //port1 V1[x * NY + y] = V - V1[x * NY + y]; V = 2 * V2[x * NY + y] + I; //port2 V2[x * NY + y] = V - V2[x * NY + y]; V = 2 * V3[x * NY + y] + I; //port3 V3[x * NY + y] = V - V3[x * NY + y]; V = 2 * V4[x * NY + y] - I; //port4 V4[x * NY + y] = V - V4[x * NY + y]; } } } // end func // CPU Function void stageConnect(double* V1, double* V2, double* V3, double* V4, // Arrays int NX, int NY, // Array arguments double rXmin, double rXmax, double rYmin, double rYmax) { // Boundary conditions /* Stage 3: Connect */ // Variables double tempV = 0; // Connect internals for (int x = 1; x < NX; x++) { for (int y = 0; y < NY; y++) { tempV = V2[x * NY + y]; V2[x * NY + y] = V4[(x - 1) * NY + y]; V4[(x - 1) * NY + y] = tempV; } } for (int x = 0; x < NX; x++) { for (int y = 1; y < NY; y++) { tempV = V1[x * NY + y]; V1[x * NY + y] = V3[x * NY + y - 1]; V3[x * NY + y - 1] = tempV; } } // Connect boundaries for (int x = 0; x < NX; x++) { V3[x * NY + NY - 1] = rYmax * V3[x * NY + NY - 1]; V1[x * NY] = rYmin * V1[x * NY]; // V1[x * NY + 0] = rYmin * V1[x * NY + 0]; } for (int y = 0; y < NY; y++) { V4[(NX - 1) * NY + y] = rXmax * V4[(NX - 1) * NY + y]; V2[y] = rXmin * V2[y]; // V2[0 * NY + y] = rXmin * V2[0 * NY + y]; } } // end func int main() { // Start timer std::clock_t start = std::clock(); /* Variables */ // Changable variables int NX = 200; // number of X int NY = 200; // number of Y int NT = 8192; // number of Times/Iterations double dl = 1; // Retrieval from GPU //*/ double* V1 = new double[int(NX * NY)](); // new double[int(NX*NY)](); // Sets all values to 0 double* V2 = new double[int(NX * NY)](); double* V3 = new double[int(NX * NY)](); double* V4 = new double[int(NX * NY)](); //*/ // Variables and coefficients // Scatter coefficient double Z = eta0 / sqrt(2.); // Boundary connect Coefficiants double rXmin = -1; double rXmax = -1; double rYmin = -1; double rYmax = -1; // impulse parameters double dt = dl / (sqrt(2.) * c); double width = 20 * dt * sqrt(2.); double delay = 100 * dt * sqrt(2.); // input position int Ein[] = { 10,10 }; // output/reading position int Eout[] = { 15,15 }; // file output std::ofstream output("CPU.csv"); for (int n = 0; n < NT; n++) { // Variables dependant on n double E0 = (1 / sqrt(2.)) * exp(-(n * dt - delay) * (n * dt - delay) / (width * width)); /* Stage 1: Source */ stageSource(V1, V2, V3, V4, Ein[0], Ein[1], E0, NY); /* Stage 2: Scatter */ stageScatter(V1, V2, V3, V4, NX, NY, Z); /* Stage 3: Connect */ stageConnect(V1, V2, V3, V4, NX, NY, rXmin, rXmax, rYmin, rYmax); output << n * dt << "," << V2[Eout[0] * NY + Eout[1]] + V4[Eout[0] * NY + Eout[1]] << std::endl; if (n % 100 == 0) std::cout << n << std::endl; } // End of loop output.close(); std::cout << "Done: " << ((std::clock() - start) / (double)CLOCKS_PER_SEC) << std::endl; std::cin.get(); } // end main // EOF
75f13b783f7ff2f6954d7568e2f237b263560e7a.cu
// Includes #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <iostream> #include <iomanip> #include <fstream> #include <ctime> // Definitions #define M_PI 3.14276 #define c 299792458 #define mu0 M_PI*4e-7 #define eta0 c*mu0 // CPU function for source calculation void stageSource(double* V1, double* V2, double* V3, double* V4, int x, int y, double E0, int NY) { /* Stage 1: Source */ // Adapted to be 1D V1[x * NY + y] = V1[x * NY + y] + E0; V2[x * NY + y] = V2[x * NY + y] - E0; V3[x * NY + y] = V3[x * NY + y] - E0; V4[x * NY + y] = V4[x * NY + y] + E0; // Using 1 dimensional arrays is more obvious to work with when porting to GPU } // end func // CPU function void stageScatter(double* V1, double* V2, double* V3, double* V4, int NX, int NY, double Z) { /* Stage 2: Scatter */ // Variables double I = 0, V = 0; // Parallelisable code // for int i = 0; i < NX*NY; i++ for (int x = 0; x < NX; x++) { for (int y = 0; y < NY; y++) { I = (V1[(x * NY) + y] + V4[(x * NY) + y] - V2[(x * NY) + y] - V3[(x * NY) + y]) / (2); // factorized by 2 for unnecessary mathematics V = 2 * V1[x * NY + y] - I; //port1 V1[x * NY + y] = V - V1[x * NY + y]; V = 2 * V2[x * NY + y] + I; //port2 V2[x * NY + y] = V - V2[x * NY + y]; V = 2 * V3[x * NY + y] + I; //port3 V3[x * NY + y] = V - V3[x * NY + y]; V = 2 * V4[x * NY + y] - I; //port4 V4[x * NY + y] = V - V4[x * NY + y]; } } } // end func // CPU Function void stageConnect(double* V1, double* V2, double* V3, double* V4, // Arrays int NX, int NY, // Array arguments double rXmin, double rXmax, double rYmin, double rYmax) { // Boundary conditions /* Stage 3: Connect */ // Variables double tempV = 0; // Connect internals for (int x = 1; x < NX; x++) { for (int y = 0; y < NY; y++) { tempV = V2[x * NY + y]; V2[x * NY + y] = V4[(x - 1) * NY + y]; V4[(x - 1) * NY + y] = tempV; } } for (int x = 0; x < NX; x++) { for (int y = 1; y < NY; y++) { tempV = V1[x * NY + y]; V1[x * NY + y] = V3[x * NY + y - 1]; V3[x * NY + y - 1] = tempV; } } // Connect boundaries for (int x = 0; x < NX; x++) { V3[x * NY + NY - 1] = rYmax * V3[x * NY + NY - 1]; V1[x * NY] = rYmin * V1[x * NY]; // V1[x * NY + 0] = rYmin * V1[x * NY + 0]; } for (int y = 0; y < NY; y++) { V4[(NX - 1) * NY + y] = rXmax * V4[(NX - 1) * NY + y]; V2[y] = rXmin * V2[y]; // V2[0 * NY + y] = rXmin * V2[0 * NY + y]; } } // end func int main() { // Start timer std::clock_t start = std::clock(); /* Variables */ // Changable variables int NX = 200; // number of X int NY = 200; // number of Y int NT = 8192; // number of Times/Iterations double dl = 1; // Retrieval from GPU //*/ double* V1 = new double[int(NX * NY)](); // new double[int(NX*NY)](); // Sets all values to 0 double* V2 = new double[int(NX * NY)](); double* V3 = new double[int(NX * NY)](); double* V4 = new double[int(NX * NY)](); //*/ // Variables and coefficients // Scatter coefficient double Z = eta0 / sqrt(2.); // Boundary connect Coefficiants double rXmin = -1; double rXmax = -1; double rYmin = -1; double rYmax = -1; // impulse parameters double dt = dl / (sqrt(2.) * c); double width = 20 * dt * sqrt(2.); double delay = 100 * dt * sqrt(2.); // input position int Ein[] = { 10,10 }; // output/reading position int Eout[] = { 15,15 }; // file output std::ofstream output("CPU.csv"); for (int n = 0; n < NT; n++) { // Variables dependant on n double E0 = (1 / sqrt(2.)) * exp(-(n * dt - delay) * (n * dt - delay) / (width * width)); /* Stage 1: Source */ stageSource(V1, V2, V3, V4, Ein[0], Ein[1], E0, NY); /* Stage 2: Scatter */ stageScatter(V1, V2, V3, V4, NX, NY, Z); /* Stage 3: Connect */ stageConnect(V1, V2, V3, V4, NX, NY, rXmin, rXmax, rYmin, rYmax); output << n * dt << "," << V2[Eout[0] * NY + Eout[1]] + V4[Eout[0] * NY + Eout[1]] << std::endl; if (n % 100 == 0) std::cout << n << std::endl; } // End of loop output.close(); std::cout << "Done: " << ((std::clock() - start) / (double)CLOCKS_PER_SEC) << std::endl; std::cin.get(); } // end main // EOF
b3ce6b30e93ec50b63624964e2838347b4bb0e0f.hip
// !!! This is a file automatically generated by hipify!!! /* compile with: nvcc -O3 hw1.cu -o hw1 */ #include <stdio.h> #include <sys/time.h> #include <hip/hip_runtime_api.h> #define IMG_DIMENSION 32 #define N_IMG_PAIRS 10000 #define IMAGE_SIZE 1024 typedef unsigned char uchar; #define OUT #define CUDA_CHECK(f) do { \ hipError_t e = f; \ if (e != hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(e)); \ exit(1); \ } \ } while (0) #define SQR(a) ((a) * (a)) double static inline get_time_msec(void) { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec * 1e+3 + t.tv_usec * 1e-3; } /* we won't load actual files. just fill the images with random bytes */ void load_image_pairs(uchar *images1, uchar *images2) { srand(0); for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) { images1[i] = rand() % 256; images2[i] = rand() % 256; } } __host__ __device__ bool is_in_image_bounds(int i, int j) { return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION); } __host__ __device__ uchar local_binary_pattern(uchar *image, int i, int j) { uchar center = image[i * IMG_DIMENSION + j]; uchar pattern = 0; if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7; if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6; if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5; if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4; if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3; if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2; if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1; if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0; return pattern; } //__device__ void zero void image_to_histogram(uchar *image, int *histogram) { memset(histogram, 0, sizeof(int) * 256); for (int i = 0; i < IMG_DIMENSION; i++) { for (int j = 0; j < IMG_DIMENSION; j++) { uchar pattern = local_binary_pattern(image, i, j); histogram[pattern]++; } } } double histogram_distance(int *h1, int *h2) { /* we'll use the chi-square distance */ double distance = 0; for (int i = 0; i < 256; i++) { if (h1[i] + h2[i] != 0) { distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]); } } return distance; } /* Your __device__ functions and __global__ kernels here */ /* ... */ __global__ void image_to_hisogram_simple(uchar *image1, OUT int *hist1) { int i = threadIdx.x; int j = threadIdx.y; uchar pattern = local_binary_pattern(image1, i, j); atomicAdd(hist1+pattern,1); // __threadfence(); } __global__ void histogram_distance(int *hist1, int *hist2, OUT double *distance) { *distance=0; //__threadfence(); int i = threadIdx.x; if (hist1[i] + hist2[i] != 0){ double temp = (double)((double)SQR(hist1[i] - hist2[i])) / (hist1[i] + hist2[i]); atomicAdd((float*)distance,(float)temp); } } __global__ void image_to_hisogram_shared(uchar *image1, OUT int *hist1) { int i = threadIdx.x; int j = threadIdx.y; __shared__ uchar im[IMAGE_SIZE]; __shared__ int sharedHist[256]; if (i*32+j <256){ sharedHist[i*32+j] = 0; }; im[i*32+j]=image1[i*32+j]; threadfence(); uchar pattern = local_binary_pattern(im, i, j); atomicAdd(sharedHist+pattern,1); threadfence(); if (i*32+j <256){ hist1[i*32+j] = sharedHist[i*32+j]; }; } __global__ void image_to_hisogram_batched(uchar *images, OUT int *hist1) { int i = threadIdx.x; int j = threadIdx.y; int k = blockIdx.x; __shared__ uchar im[IMAGE_SIZE]; im[j+32*i] = images[k*IMAGE_SIZE+j+32*i]; __shared__ int sharedHist[256]; if (i*32+j <256){ sharedHist[i*32+j] = 0; }; threadfence(); uchar pattern = local_binary_pattern(im, i, j); atomicAdd(sharedHist+pattern,1); if (i*32+j <256){ hist1[k*256+i*32+j] = sharedHist[i*32+j]; }; syncthreads(); } __global__ void histogram_distance_batched(int *hist1, int *hist2, OUT double *distance) { *distance=0; //__threadfence(); int i = threadIdx.x; int k = blockIdx.x; if (hist1[256*k+i] + hist2[256*k+i] != 0){ double temp = (double)((double)SQR(hist1[256*k+i] - hist2[256*k+i])) / (hist1[256*k+i] + hist2[256*k+i]); atomicAdd((float*)distance,(float)temp); }; } int main() { uchar *images1; /* we concatenate all images in one huge array */ uchar *images2; CUDA_CHECK( hipHostMalloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) ); CUDA_CHECK( hipHostMalloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) ); load_image_pairs(images1, images2); double t_start, t_finish; double total_distance; /* using CPU */ printf("\n=== CPU ===\n"); int histogram1[256]; int histogram2[256]; t_start = get_time_msec(); for (int i = 0; i < N_IMG_PAIRS; i++) { image_to_histogram(&images1[i * IMG_DIMENSION * IMG_DIMENSION], histogram1); image_to_histogram(&images2[i * IMG_DIMENSION * IMG_DIMENSION], histogram2); total_distance += histogram_distance(histogram1, histogram2); } t_finish = get_time_msec(); printf("average distance between images %f\n", total_distance / N_IMG_PAIRS); printf("total time %f [msec]\n", t_finish - t_start); /* using GPU task-serial */ printf("\n=== GPU Task Serial ===\n"); do { //* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise *//* //* Your Code Here *//* uchar *gpu_image1, *gpu_image2; // TODO: allocate with hipMalloc CUDA_CHECK(hipMalloc(&gpu_image1,1024*sizeof(uchar))); CUDA_CHECK(hipMalloc(&gpu_image2,1024*sizeof(uchar))); int *gpu_hist1, *gpu_hist2; // TODO: allocate with hipMalloc CUDA_CHECK(hipMalloc(&gpu_hist1,256*sizeof(int))); CUDA_CHECK(hipMalloc(&gpu_hist2,256*sizeof(int))); CUDA_CHECK(hipMemset(gpu_hist1,0,256*sizeof(int))); CUDA_CHECK(hipMemset(gpu_hist2,0,256*sizeof(int))); double *gpu_hist_distance; //TODO: allocate with hipMalloc CUDA_CHECK(hipMalloc(&gpu_hist_distance,sizeof(double))); double cpu_hist_distance; t_start = get_time_msec(); hipProfilerStart(); for (int i = 0; i < N_IMG_PAIRS; i++) { dim3 threadsPerBlock(32,32); // TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2 CUDA_CHECK(hipMemcpy(gpu_image1, images1+i*1024, 1024 * sizeof(uchar), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(gpu_image2, images2+ i*1024, 1024 * sizeof(uchar), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( image_to_hisogram_simple), dim3(1), dim3(threadsPerBlock), 0, 0, gpu_image2, gpu_hist2); hipLaunchKernelGGL(( image_to_hisogram_simple), dim3(1), dim3(threadsPerBlock), 0, 0, gpu_image1, gpu_hist1); hipLaunchKernelGGL(( histogram_distance), dim3(1), dim3(256), 0, 0, gpu_hist1, gpu_hist2, gpu_hist_distance); //TODO: copy gpu_hist_distance to cpu_hist_distance CUDA_CHECK(hipMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), hipMemcpyDeviceToHost)); total_distance += cpu_hist_distance; } hipProfilerStop(); CUDA_CHECK(hipFree(gpu_hist1)); CUDA_CHECK(hipFree(gpu_hist2)); CUDA_CHECK(hipFree(gpu_image1)); CUDA_CHECK(hipFree(gpu_image2)); CUDA_CHECK(hipDeviceSynchronize()); t_finish = get_time_msec(); printf("average distance between images %f\n", total_distance / N_IMG_PAIRS); printf("total time %f [msec]\n", t_finish - t_start); } while (0); /* using GPU task-serial + images and histograms in shared memory */ printf("\n=== GPU Task Serial with shared memory ===\n"); do { /* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise */ /* Your Code Here */ uchar *gpu_image1_shared; uchar *gpu_image2_shared; // TODO: allocate with hipMalloc CUDA_CHECK(hipMalloc(&gpu_image1_shared,1024*sizeof(uchar))); CUDA_CHECK(hipMalloc(&gpu_image2_shared,1024*sizeof(uchar))); int *gpu_hist1; int *gpu_hist2; // TODO: allocate with hipMalloc CUDA_CHECK(hipMalloc(&gpu_hist1,256*sizeof(int))); CUDA_CHECK(hipMalloc(&gpu_hist2,256*sizeof(int))); //hipMemset(&gpu_hist1,0,256*sizeof(int)); //hipMemset(&gpu_hist2,0,256*sizeof(int)); double *gpu_hist_distance; //TODO: allocate with hipMalloc CUDA_CHECK(hipMalloc(&gpu_hist_distance,sizeof(double))); double cpu_hist_distance; t_start = get_time_msec(); for (int i = 0; i < N_IMG_PAIRS; i++) { dim3 threadsPerBlock(32,32); // TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2 CUDA_CHECK(hipMemcpy(gpu_image1_shared, images1+i*1024, 1024 * sizeof(uchar), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(gpu_image2_shared, images2+i*1024, 1024 * sizeof(uchar), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( image_to_hisogram_shared), dim3(1), dim3(threadsPerBlock), 0, 0, gpu_image1_shared, gpu_hist1); hipLaunchKernelGGL(( image_to_hisogram_shared), dim3(1), dim3(threadsPerBlock), 0, 0, gpu_image2_shared, gpu_hist2); //->move to global hiat hipLaunchKernelGGL(( histogram_distance), dim3(1), dim3(256), 0, 0, gpu_hist1, gpu_hist2, gpu_hist_distance); //TODO: copy gpu_hist_distance to cpu_hist_distance CUDA_CHECK(hipMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), hipMemcpyDeviceToHost)); total_distance += cpu_hist_distance; } CUDA_CHECK(hipDeviceSynchronize()); CUDA_CHECK(hipFree(gpu_hist1)); CUDA_CHECK(hipFree(gpu_hist2)); CUDA_CHECK(hipFree(gpu_image1_shared)); CUDA_CHECK(hipFree(gpu_image2_shared)); t_finish = get_time_msec(); } while (0); printf("average distance between images %f\n", total_distance / N_IMG_PAIRS); printf("total time %f [msec]\n", t_finish - t_start); /* using GPU + batching */ printf("\n=== GPU Batching ===\n"); do { //* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise *//* //* Your Code Here *//* uchar *gpu_image1, *gpu_image2; // TODO: allocate with hipMalloc CUDA_CHECK(hipMalloc(&gpu_image1,N_IMG_PAIRS*1024*sizeof(uchar))); CUDA_CHECK(hipMalloc(&gpu_image2,N_IMG_PAIRS*1024*sizeof(uchar))); int *gpu_hist1, *gpu_hist2; // TODO: allocate with hipMalloc CUDA_CHECK(hipMalloc(&gpu_hist1,N_IMG_PAIRS*256*sizeof(int))); CUDA_CHECK(hipMalloc(&gpu_hist2,N_IMG_PAIRS*256*sizeof(int))); CUDA_CHECK(hipMemset(gpu_hist1,0,N_IMG_PAIRS*256*sizeof(int))); CUDA_CHECK(hipMemset(gpu_hist2,0,N_IMG_PAIRS*256*sizeof(int))); double *gpu_hist_distance; //TODO: allocate with hipMalloc CUDA_CHECK(hipMalloc(&gpu_hist_distance,sizeof(double))); double cpu_hist_distance; t_start = get_time_msec(); dim3 threadsPerBlock(32,32); // TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2 CUDA_CHECK(hipMemcpy(gpu_image1, images1, 1024 *N_IMG_PAIRS* sizeof(uchar), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(gpu_image2, images2, 1024 *N_IMG_PAIRS* sizeof(uchar), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( image_to_hisogram_batched), dim3(N_IMG_PAIRS), dim3(threadsPerBlock), 0, 0, gpu_image2, gpu_hist2); hipLaunchKernelGGL(( image_to_hisogram_batched), dim3(N_IMG_PAIRS), dim3(threadsPerBlock), 0, 0, gpu_image1, gpu_hist1); hipLaunchKernelGGL(( histogram_distance_batched), dim3(N_IMG_PAIRS), dim3(256), 0, 0, gpu_hist1, gpu_hist2, gpu_hist_distance); //TODO: copy gpu_hist_distance to cpu_hist_distance CUDA_CHECK(hipMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), hipMemcpyDeviceToHost)); total_distance += cpu_hist_distance; CUDA_CHECK(hipDeviceSynchronize()); CUDA_CHECK(hipFree(gpu_hist1)); CUDA_CHECK(hipFree(gpu_hist2)); CUDA_CHECK(hipFree(gpu_image1)); CUDA_CHECK(hipFree(gpu_image2)); t_finish = get_time_msec(); } while (0); /* Your Code Here */ printf("average distance between images %f\n", total_distance / N_IMG_PAIRS); printf("total time %f [msec]\n", t_finish - t_start); return 0; } //bla
b3ce6b30e93ec50b63624964e2838347b4bb0e0f.cu
/* compile with: nvcc -O3 hw1.cu -o hw1 */ #include <stdio.h> #include <sys/time.h> #include <cuda_profiler_api.h> #define IMG_DIMENSION 32 #define N_IMG_PAIRS 10000 #define IMAGE_SIZE 1024 typedef unsigned char uchar; #define OUT #define CUDA_CHECK(f) do { \ cudaError_t e = f; \ if (e != cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \ exit(1); \ } \ } while (0) #define SQR(a) ((a) * (a)) double static inline get_time_msec(void) { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec * 1e+3 + t.tv_usec * 1e-3; } /* we won't load actual files. just fill the images with random bytes */ void load_image_pairs(uchar *images1, uchar *images2) { srand(0); for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) { images1[i] = rand() % 256; images2[i] = rand() % 256; } } __host__ __device__ bool is_in_image_bounds(int i, int j) { return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION); } __host__ __device__ uchar local_binary_pattern(uchar *image, int i, int j) { uchar center = image[i * IMG_DIMENSION + j]; uchar pattern = 0; if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7; if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6; if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5; if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4; if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3; if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2; if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1; if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0; return pattern; } //__device__ void zero void image_to_histogram(uchar *image, int *histogram) { memset(histogram, 0, sizeof(int) * 256); for (int i = 0; i < IMG_DIMENSION; i++) { for (int j = 0; j < IMG_DIMENSION; j++) { uchar pattern = local_binary_pattern(image, i, j); histogram[pattern]++; } } } double histogram_distance(int *h1, int *h2) { /* we'll use the chi-square distance */ double distance = 0; for (int i = 0; i < 256; i++) { if (h1[i] + h2[i] != 0) { distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]); } } return distance; } /* Your __device__ functions and __global__ kernels here */ /* ... */ __global__ void image_to_hisogram_simple(uchar *image1, OUT int *hist1) { int i = threadIdx.x; int j = threadIdx.y; uchar pattern = local_binary_pattern(image1, i, j); atomicAdd(hist1+pattern,1); // __threadfence(); } __global__ void histogram_distance(int *hist1, int *hist2, OUT double *distance) { *distance=0; //__threadfence(); int i = threadIdx.x; if (hist1[i] + hist2[i] != 0){ double temp = (double)((double)SQR(hist1[i] - hist2[i])) / (hist1[i] + hist2[i]); atomicAdd((float*)distance,(float)temp); } } __global__ void image_to_hisogram_shared(uchar *image1, OUT int *hist1) { int i = threadIdx.x; int j = threadIdx.y; __shared__ uchar im[IMAGE_SIZE]; __shared__ int sharedHist[256]; if (i*32+j <256){ sharedHist[i*32+j] = 0; }; im[i*32+j]=image1[i*32+j]; threadfence(); uchar pattern = local_binary_pattern(im, i, j); atomicAdd(sharedHist+pattern,1); threadfence(); if (i*32+j <256){ hist1[i*32+j] = sharedHist[i*32+j]; }; } __global__ void image_to_hisogram_batched(uchar *images, OUT int *hist1) { int i = threadIdx.x; int j = threadIdx.y; int k = blockIdx.x; __shared__ uchar im[IMAGE_SIZE]; im[j+32*i] = images[k*IMAGE_SIZE+j+32*i]; __shared__ int sharedHist[256]; if (i*32+j <256){ sharedHist[i*32+j] = 0; }; threadfence(); uchar pattern = local_binary_pattern(im, i, j); atomicAdd(sharedHist+pattern,1); if (i*32+j <256){ hist1[k*256+i*32+j] = sharedHist[i*32+j]; }; syncthreads(); } __global__ void histogram_distance_batched(int *hist1, int *hist2, OUT double *distance) { *distance=0; //__threadfence(); int i = threadIdx.x; int k = blockIdx.x; if (hist1[256*k+i] + hist2[256*k+i] != 0){ double temp = (double)((double)SQR(hist1[256*k+i] - hist2[256*k+i])) / (hist1[256*k+i] + hist2[256*k+i]); atomicAdd((float*)distance,(float)temp); }; } int main() { uchar *images1; /* we concatenate all images in one huge array */ uchar *images2; CUDA_CHECK( cudaHostAlloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) ); CUDA_CHECK( cudaHostAlloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) ); load_image_pairs(images1, images2); double t_start, t_finish; double total_distance; /* using CPU */ printf("\n=== CPU ===\n"); int histogram1[256]; int histogram2[256]; t_start = get_time_msec(); for (int i = 0; i < N_IMG_PAIRS; i++) { image_to_histogram(&images1[i * IMG_DIMENSION * IMG_DIMENSION], histogram1); image_to_histogram(&images2[i * IMG_DIMENSION * IMG_DIMENSION], histogram2); total_distance += histogram_distance(histogram1, histogram2); } t_finish = get_time_msec(); printf("average distance between images %f\n", total_distance / N_IMG_PAIRS); printf("total time %f [msec]\n", t_finish - t_start); /* using GPU task-serial */ printf("\n=== GPU Task Serial ===\n"); do { //* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise *//* //* Your Code Here *//* uchar *gpu_image1, *gpu_image2; // TODO: allocate with cudaMalloc CUDA_CHECK(cudaMalloc(&gpu_image1,1024*sizeof(uchar))); CUDA_CHECK(cudaMalloc(&gpu_image2,1024*sizeof(uchar))); int *gpu_hist1, *gpu_hist2; // TODO: allocate with cudaMalloc CUDA_CHECK(cudaMalloc(&gpu_hist1,256*sizeof(int))); CUDA_CHECK(cudaMalloc(&gpu_hist2,256*sizeof(int))); CUDA_CHECK(cudaMemset(gpu_hist1,0,256*sizeof(int))); CUDA_CHECK(cudaMemset(gpu_hist2,0,256*sizeof(int))); double *gpu_hist_distance; //TODO: allocate with cudaMalloc CUDA_CHECK(cudaMalloc(&gpu_hist_distance,sizeof(double))); double cpu_hist_distance; t_start = get_time_msec(); cudaProfilerStart(); for (int i = 0; i < N_IMG_PAIRS; i++) { dim3 threadsPerBlock(32,32); // TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2 CUDA_CHECK(cudaMemcpy(gpu_image1, images1+i*1024, 1024 * sizeof(uchar), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(gpu_image2, images2+ i*1024, 1024 * sizeof(uchar), cudaMemcpyHostToDevice)); image_to_hisogram_simple<<<1, threadsPerBlock>>>(gpu_image2, gpu_hist2); image_to_hisogram_simple<<<1, threadsPerBlock>>>(gpu_image1, gpu_hist1); histogram_distance<<<1, 256>>>(gpu_hist1, gpu_hist2, gpu_hist_distance); //TODO: copy gpu_hist_distance to cpu_hist_distance CUDA_CHECK(cudaMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), cudaMemcpyDeviceToHost)); total_distance += cpu_hist_distance; } cudaProfilerStop(); CUDA_CHECK(cudaFree(gpu_hist1)); CUDA_CHECK(cudaFree(gpu_hist2)); CUDA_CHECK(cudaFree(gpu_image1)); CUDA_CHECK(cudaFree(gpu_image2)); CUDA_CHECK(cudaDeviceSynchronize()); t_finish = get_time_msec(); printf("average distance between images %f\n", total_distance / N_IMG_PAIRS); printf("total time %f [msec]\n", t_finish - t_start); } while (0); /* using GPU task-serial + images and histograms in shared memory */ printf("\n=== GPU Task Serial with shared memory ===\n"); do { /* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise */ /* Your Code Here */ uchar *gpu_image1_shared; uchar *gpu_image2_shared; // TODO: allocate with cudaMalloc CUDA_CHECK(cudaMalloc(&gpu_image1_shared,1024*sizeof(uchar))); CUDA_CHECK(cudaMalloc(&gpu_image2_shared,1024*sizeof(uchar))); int *gpu_hist1; int *gpu_hist2; // TODO: allocate with cudaMalloc CUDA_CHECK(cudaMalloc(&gpu_hist1,256*sizeof(int))); CUDA_CHECK(cudaMalloc(&gpu_hist2,256*sizeof(int))); //cudaMemset(&gpu_hist1,0,256*sizeof(int)); //cudaMemset(&gpu_hist2,0,256*sizeof(int)); double *gpu_hist_distance; //TODO: allocate with cudaMalloc CUDA_CHECK(cudaMalloc(&gpu_hist_distance,sizeof(double))); double cpu_hist_distance; t_start = get_time_msec(); for (int i = 0; i < N_IMG_PAIRS; i++) { dim3 threadsPerBlock(32,32); // TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2 CUDA_CHECK(cudaMemcpy(gpu_image1_shared, images1+i*1024, 1024 * sizeof(uchar), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(gpu_image2_shared, images2+i*1024, 1024 * sizeof(uchar), cudaMemcpyHostToDevice)); image_to_hisogram_shared<<<1, threadsPerBlock>>>(gpu_image1_shared, gpu_hist1); image_to_hisogram_shared<<<1, threadsPerBlock>>>(gpu_image2_shared, gpu_hist2); //->move to global hiat histogram_distance<<<1, 256>>>(gpu_hist1, gpu_hist2, gpu_hist_distance); //TODO: copy gpu_hist_distance to cpu_hist_distance CUDA_CHECK(cudaMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), cudaMemcpyDeviceToHost)); total_distance += cpu_hist_distance; } CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaFree(gpu_hist1)); CUDA_CHECK(cudaFree(gpu_hist2)); CUDA_CHECK(cudaFree(gpu_image1_shared)); CUDA_CHECK(cudaFree(gpu_image2_shared)); t_finish = get_time_msec(); } while (0); printf("average distance between images %f\n", total_distance / N_IMG_PAIRS); printf("total time %f [msec]\n", t_finish - t_start); /* using GPU + batching */ printf("\n=== GPU Batching ===\n"); do { //* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise *//* //* Your Code Here *//* uchar *gpu_image1, *gpu_image2; // TODO: allocate with cudaMalloc CUDA_CHECK(cudaMalloc(&gpu_image1,N_IMG_PAIRS*1024*sizeof(uchar))); CUDA_CHECK(cudaMalloc(&gpu_image2,N_IMG_PAIRS*1024*sizeof(uchar))); int *gpu_hist1, *gpu_hist2; // TODO: allocate with cudaMalloc CUDA_CHECK(cudaMalloc(&gpu_hist1,N_IMG_PAIRS*256*sizeof(int))); CUDA_CHECK(cudaMalloc(&gpu_hist2,N_IMG_PAIRS*256*sizeof(int))); CUDA_CHECK(cudaMemset(gpu_hist1,0,N_IMG_PAIRS*256*sizeof(int))); CUDA_CHECK(cudaMemset(gpu_hist2,0,N_IMG_PAIRS*256*sizeof(int))); double *gpu_hist_distance; //TODO: allocate with cudaMalloc CUDA_CHECK(cudaMalloc(&gpu_hist_distance,sizeof(double))); double cpu_hist_distance; t_start = get_time_msec(); dim3 threadsPerBlock(32,32); // TODO: copy relevant images from images1 and images2 to gpu_image1 and gpu_image2 CUDA_CHECK(cudaMemcpy(gpu_image1, images1, 1024 *N_IMG_PAIRS* sizeof(uchar), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(gpu_image2, images2, 1024 *N_IMG_PAIRS* sizeof(uchar), cudaMemcpyHostToDevice)); image_to_hisogram_batched<<<N_IMG_PAIRS, threadsPerBlock>>>(gpu_image2, gpu_hist2); image_to_hisogram_batched<<<N_IMG_PAIRS, threadsPerBlock>>>(gpu_image1, gpu_hist1); histogram_distance_batched<<<N_IMG_PAIRS, 256>>>(gpu_hist1, gpu_hist2, gpu_hist_distance); //TODO: copy gpu_hist_distance to cpu_hist_distance CUDA_CHECK(cudaMemcpy(&cpu_hist_distance, gpu_hist_distance, sizeof(double), cudaMemcpyDeviceToHost)); total_distance += cpu_hist_distance; CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaFree(gpu_hist1)); CUDA_CHECK(cudaFree(gpu_hist2)); CUDA_CHECK(cudaFree(gpu_image1)); CUDA_CHECK(cudaFree(gpu_image2)); t_finish = get_time_msec(); } while (0); /* Your Code Here */ printf("average distance between images %f\n", total_distance / N_IMG_PAIRS); printf("total time %f [msec]\n", t_finish - t_start); return 0; } //bla
af148548eb5e020bd4767bae32cd93813c6dd771.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../inc/parser.h" #include "../inc/helper.cuh" #include <errno.h> template <int blockSize> __global__ void shared_memory_approach_aos(Tuple* R, LLONG rSize, Tuple* S, LLONG sSize, LLONG* partialSums) { extern __shared__ float sdata[]; int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; int blockID = gridDim.x * blockIdx.y + blockIdx.x; int threadID = (threadIdx.y * blockDim.x) + threadIdx.x; //local thread id, i.e. within block if(row > rSize) return; int rA = R[row].a; LLONG partialSum = 0; while (row < rSize && col < sSize) { if (rA > S[col].a) { partialSum += S[col].x; } col += blockDim.y; } sdata[threadID] = partialSum; __syncthreads(); if ((blockSize >= 1024) && (threadID < 512)) { sdata[threadID] = partialSum = partialSum + sdata[threadID + 512]; } __syncthreads(); if ((blockSize >= 512) && (threadID < 256)) { sdata[threadID] = partialSum = partialSum + sdata[threadID + 256]; } __syncthreads(); if ((blockSize >= 256) && (threadID < 128)) { sdata[threadID] = partialSum = partialSum + sdata[threadID + 128]; } __syncthreads(); if ((blockSize >= 128) && (threadID < 64)) { sdata[threadID] = partialSum = partialSum + sdata[threadID + 64]; } __syncthreads(); if (threadID < 32) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) partialSum += sdata[threadID + 32]; // Reduce final warp using shuffle for (int offset = warpSize / 2; offset > 0; offset /= 2) { partialSum += __shfl_down(partialSum, offset); } } if (threadID == 0) partialSums[blockID] = partialSum; } float callKernel(Tuple* R, LLONG rSize, Tuple* S, LLONG sSize, dim3 threadBlock) { float millis = 0.0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); Tuple* deviceR; Tuple* deviceS; LLONG* hostPartialSums; LLONG* devicePartialSums; // allocate memory for relations checkErrors(hipMalloc((void**)&(deviceR), sizeof(Tuple) * rSize)); checkErrors(hipMalloc((void**)&(deviceS), sizeof(Tuple) * sSize)); // copy relations to gpu checkErrors(hipMemcpy(deviceR, R, sizeof(Tuple) * rSize, hipMemcpyHostToDevice)); checkErrors(hipMemcpy(deviceS, S, sizeof(Tuple) * sSize, hipMemcpyHostToDevice)); dim3 grid((rSize / threadBlock.x) + 1); // allocate memory for partialSums checkErrors(hipMalloc((void**)&(devicePartialSums), sizeof(LLONG) * grid.x)); hipEventRecord(start); hipLaunchKernelGGL(( shared_memory_approach_aos<1024>) , dim3(grid), dim3(threadBlock), 1024 * sizeof(LLONG), 0, deviceR, rSize, deviceS, sSize, devicePartialSums); hipEventRecord(stop); checkErrors(hipPeekAtLastError()); checkErrors(hipDeviceSynchronize()); hostPartialSums = (LLONG*)malloc(sizeof(LLONG) * grid.x); checkErrors(hipMemcpy(hostPartialSums, devicePartialSums, sizeof(LLONG) * grid.x, hipMemcpyDeviceToHost)); LLONG sum = 0; for(int i = 0; i < grid.x; ++i) { sum += hostPartialSums[i]; } printf("Sum: %llu\n", sum); hipEventSynchronize(stop); hipEventElapsedTime(&millis, start, stop); hipEventDestroy(start); hipEventDestroy(stop); hipFree(deviceR); hipFree(deviceS); hipFree(devicePartialSums); hipDeviceReset(); free(hostPartialSums); return millis; } int main(int argc, char** argv) { char* rPath; LLONG rSize; char* sPath; LLONG sSize; int blockSideX; int blockSideY; int repeats; char* eptr; // read input arguments if (argc != 8) { printf("Not enough arguments\n---------------\n"); printf("1st:\t R path\n"); printf("2nd:\t |R| (R size)\n"); printf("3rd:\t S path\n"); printf("4th:\t |S| (S size)\n"); printf("5th:\t Thread block side x \n"); printf("6th:\t Thread block side y \n"); printf("7th:\t Number of repeats\n"); return 1; } rPath = argv[1]; sPath = argv[3]; rSize = strtoll(argv[2], &eptr, 10); sSize = strtoll(argv[4], &eptr, 10); blockSideX = strtol(argv[5], &eptr, 10); blockSideY = strtol(argv[6], &eptr, 10); repeats = strtol(argv[7], &eptr, 10); if(rSize == 0 || sSize == 0 || blockSideX == 0 || blockSideY == 0 || repeats == 0) { printf("Wrong input arguments (error: %d)", errno); return 1; } // allocate memory Tuple* R; Tuple* S; R = (Tuple*)malloc(sizeof(Tuple) * rSize); S = (Tuple*)malloc(sizeof(Tuple) * sSize); readRelationAoS(rPath, R); readRelationAoS(sPath, S); printf("Shared Memory Approach (AoS)\n"); dim3 threadBlock(blockSideX, blockSideY); // call kernel multiple times float time_aggregate = 0.0; for(int i = 0; i < repeats; ++i) { time_aggregate += callKernel(R, rSize, S, sSize, threadBlock); } // calculate and print average time printf("Execution time: %f\n", (time_aggregate / (float) repeats)); free(R); free(S); return 0; }
af148548eb5e020bd4767bae32cd93813c6dd771.cu
#include "../inc/parser.h" #include "../inc/helper.cuh" #include <errno.h> template <int blockSize> __global__ void shared_memory_approach_aos(Tuple* R, LLONG rSize, Tuple* S, LLONG sSize, LLONG* partialSums) { extern __shared__ float sdata[]; int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; int blockID = gridDim.x * blockIdx.y + blockIdx.x; int threadID = (threadIdx.y * blockDim.x) + threadIdx.x; //local thread id, i.e. within block if(row > rSize) return; int rA = R[row].a; LLONG partialSum = 0; while (row < rSize && col < sSize) { if (rA > S[col].a) { partialSum += S[col].x; } col += blockDim.y; } sdata[threadID] = partialSum; __syncthreads(); if ((blockSize >= 1024) && (threadID < 512)) { sdata[threadID] = partialSum = partialSum + sdata[threadID + 512]; } __syncthreads(); if ((blockSize >= 512) && (threadID < 256)) { sdata[threadID] = partialSum = partialSum + sdata[threadID + 256]; } __syncthreads(); if ((blockSize >= 256) && (threadID < 128)) { sdata[threadID] = partialSum = partialSum + sdata[threadID + 128]; } __syncthreads(); if ((blockSize >= 128) && (threadID < 64)) { sdata[threadID] = partialSum = partialSum + sdata[threadID + 64]; } __syncthreads(); if (threadID < 32) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) partialSum += sdata[threadID + 32]; // Reduce final warp using shuffle for (int offset = warpSize / 2; offset > 0; offset /= 2) { partialSum += __shfl_down(partialSum, offset); } } if (threadID == 0) partialSums[blockID] = partialSum; } float callKernel(Tuple* R, LLONG rSize, Tuple* S, LLONG sSize, dim3 threadBlock) { float millis = 0.0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); Tuple* deviceR; Tuple* deviceS; LLONG* hostPartialSums; LLONG* devicePartialSums; // allocate memory for relations checkErrors(cudaMalloc((void**)&(deviceR), sizeof(Tuple) * rSize)); checkErrors(cudaMalloc((void**)&(deviceS), sizeof(Tuple) * sSize)); // copy relations to gpu checkErrors(cudaMemcpy(deviceR, R, sizeof(Tuple) * rSize, cudaMemcpyHostToDevice)); checkErrors(cudaMemcpy(deviceS, S, sizeof(Tuple) * sSize, cudaMemcpyHostToDevice)); dim3 grid((rSize / threadBlock.x) + 1); // allocate memory for partialSums checkErrors(cudaMalloc((void**)&(devicePartialSums), sizeof(LLONG) * grid.x)); cudaEventRecord(start); shared_memory_approach_aos<1024> <<<grid, threadBlock, 1024 * sizeof(LLONG)>>>(deviceR, rSize, deviceS, sSize, devicePartialSums); cudaEventRecord(stop); checkErrors(cudaPeekAtLastError()); checkErrors(cudaDeviceSynchronize()); hostPartialSums = (LLONG*)malloc(sizeof(LLONG) * grid.x); checkErrors(cudaMemcpy(hostPartialSums, devicePartialSums, sizeof(LLONG) * grid.x, cudaMemcpyDeviceToHost)); LLONG sum = 0; for(int i = 0; i < grid.x; ++i) { sum += hostPartialSums[i]; } printf("Sum: %llu\n", sum); cudaEventSynchronize(stop); cudaEventElapsedTime(&millis, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(deviceR); cudaFree(deviceS); cudaFree(devicePartialSums); cudaDeviceReset(); free(hostPartialSums); return millis; } int main(int argc, char** argv) { char* rPath; LLONG rSize; char* sPath; LLONG sSize; int blockSideX; int blockSideY; int repeats; char* eptr; // read input arguments if (argc != 8) { printf("Not enough arguments\n---------------\n"); printf("1st:\t R path\n"); printf("2nd:\t |R| (R size)\n"); printf("3rd:\t S path\n"); printf("4th:\t |S| (S size)\n"); printf("5th:\t Thread block side x \n"); printf("6th:\t Thread block side y \n"); printf("7th:\t Number of repeats\n"); return 1; } rPath = argv[1]; sPath = argv[3]; rSize = strtoll(argv[2], &eptr, 10); sSize = strtoll(argv[4], &eptr, 10); blockSideX = strtol(argv[5], &eptr, 10); blockSideY = strtol(argv[6], &eptr, 10); repeats = strtol(argv[7], &eptr, 10); if(rSize == 0 || sSize == 0 || blockSideX == 0 || blockSideY == 0 || repeats == 0) { printf("Wrong input arguments (error: %d)", errno); return 1; } // allocate memory Tuple* R; Tuple* S; R = (Tuple*)malloc(sizeof(Tuple) * rSize); S = (Tuple*)malloc(sizeof(Tuple) * sSize); readRelationAoS(rPath, R); readRelationAoS(sPath, S); printf("Shared Memory Approach (AoS)\n"); dim3 threadBlock(blockSideX, blockSideY); // call kernel multiple times float time_aggregate = 0.0; for(int i = 0; i < repeats; ++i) { time_aggregate += callKernel(R, rSize, S, sSize, threadBlock); } // calculate and print average time printf("Execution time: %f\n", (time_aggregate / (float) repeats)); free(R); free(S); return 0; }
ee2cb991df17f6242d81e666f23019573b611978.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cudaKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *n = NULL; hipMalloc(&n, XSIZE*YSIZE); int limit = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cudaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,limit); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cudaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,limit); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cudaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,limit); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ee2cb991df17f6242d81e666f23019573b611978.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cudaKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *n = NULL; cudaMalloc(&n, XSIZE*YSIZE); int limit = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cudaKernel<<<gridBlock,threadBlock>>>(n,limit); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cudaKernel<<<gridBlock,threadBlock>>>(n,limit); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cudaKernel<<<gridBlock,threadBlock>>>(n,limit); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1e66daf8654c3fa7caad2ce96e01853a72b20258.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void add(int* d_a, int* d_b, int* d_c){ int tid = threadIdx.x + blockIdx.x*blockDim.x; if(tid < 2000){ d_c[tid] = d_a[tid] + d_b[tid]; } } int main(int argc, char* argv[]){ hipSetDevice(1); return 0; }
1e66daf8654c3fa7caad2ce96e01853a72b20258.cu
#include <stdio.h> __global__ void add(int* d_a, int* d_b, int* d_c){ int tid = threadIdx.x + blockIdx.x*blockDim.x; if(tid < 2000){ d_c[tid] = d_a[tid] + d_b[tid]; } } int main(int argc, char* argv[]){ cudaSetDevice(1); return 0; }
0023155f79675fa66715c1dca7ce066ebe1cac56.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/overlap_accuracy_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void OverlapAccuracyForwardGPU(const int nthreads, const int dim, const Dtype* prediction, const Dtype* label, Dtype* pre, Dtype* recall) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* cur_prediction = prediction + index * dim; const Dtype* cur_label = label + index * dim; Dtype count_pre, count_gt, count_it; count_pre = 0; count_gt = 0; count_it = 0; for (int i = 0; i < dim; i++) { if(cur_label[i] > 0.5 && cur_prediction[i] >= 0.5) { count_pre++; count_gt++; count_it++; } else if (label[i] >0.5) { count_gt++; } else if (cur_prediction[i] >= 0.5) { count_pre++; } } pre[index] = count_it / (count_pre + Dtype(FLT_MIN)); recall[index] = count_it / (count_gt + Dtype(FLT_MIN)); } } template <typename Dtype> void OverlapAccuracyLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int num = bottom[0]->num(); int dim = bottom[0]->count(1); Dtype* pre_data = pre_.mutable_gpu_data(); Dtype* recall_data = recall_.mutable_gpu_data(); const Dtype* prediction = bottom[0]->gpu_data(); const Dtype* label = bottom[1]->gpu_data(); hipLaunchKernelGGL(( OverlapAccuracyForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num, dim, prediction, label, pre_data, recall_data); Dtype P, R; caffe_gpu_asum(num, pre_data, &P); caffe_gpu_asum(num, recall_data, &R); top[0]->mutable_cpu_data()[0] = P / num; if (top.size() >= 2) { top[1]->mutable_cpu_data()[0] = R / num; } } INSTANTIATE_LAYER_GPU_FUNCS(OverlapAccuracyLayer); } // namespace caffe
0023155f79675fa66715c1dca7ce066ebe1cac56.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/overlap_accuracy_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void OverlapAccuracyForwardGPU(const int nthreads, const int dim, const Dtype* prediction, const Dtype* label, Dtype* pre, Dtype* recall) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* cur_prediction = prediction + index * dim; const Dtype* cur_label = label + index * dim; Dtype count_pre, count_gt, count_it; count_pre = 0; count_gt = 0; count_it = 0; for (int i = 0; i < dim; i++) { if(cur_label[i] > 0.5 && cur_prediction[i] >= 0.5) { count_pre++; count_gt++; count_it++; } else if (label[i] >0.5) { count_gt++; } else if (cur_prediction[i] >= 0.5) { count_pre++; } } pre[index] = count_it / (count_pre + Dtype(FLT_MIN)); recall[index] = count_it / (count_gt + Dtype(FLT_MIN)); } } template <typename Dtype> void OverlapAccuracyLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int num = bottom[0]->num(); int dim = bottom[0]->count(1); Dtype* pre_data = pre_.mutable_gpu_data(); Dtype* recall_data = recall_.mutable_gpu_data(); const Dtype* prediction = bottom[0]->gpu_data(); const Dtype* label = bottom[1]->gpu_data(); OverlapAccuracyForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS>>>(num, dim, prediction, label, pre_data, recall_data); Dtype P, R; caffe_gpu_asum(num, pre_data, &P); caffe_gpu_asum(num, recall_data, &R); top[0]->mutable_cpu_data()[0] = P / num; if (top.size() >= 2) { top[1]->mutable_cpu_data()[0] = R / num; } } INSTANTIATE_LAYER_GPU_FUNCS(OverlapAccuracyLayer); } // namespace caffe
4b308b6134d5b4517cc761980e92eedfff05a7ba.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include "memory_hip.cuh" // Handle errors raised by the GPU // From http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // Memory management extern void init_cuda() { double *dummy; gpuErrchk( hipMalloc((void**) &dummy, sizeof(double)) ); gpuErrchk( hipFree(dummy) ); } extern void init_memory(struct memory_cuda *mem, struct res_cuda *res, int sx, int sy, int sz, double* u) { double *xie, *xio; // xie -> even coordinates of xi, xio -> odd coordinates double *xiobar; // xiobar -> over-relaxation of xio double *gle, *glo; // gap arrays // GPU versions double *dev_xie, *dev_xio, *dev_u; // gpu versions double *dev_xiobar, *dev_xioswp; // gpu versions double *dev_gle, *dev_glo; // gap array res->it = 0; res->msec = 0; res->gap = 0; res->rmse = 0; int sxyz = sx * sy * sz; int Ke = sx/2, Le=sy/2; int Me = sz > 1 ? sz/2 : 1; int Ko = (sx-2)/2, Lo = (sy-2)/2; int Mo = sz > 1 ? (sz-2) : 1; int factor = sz > 1 ? 12 : 4; // Memory management xie = (double *) malloc(Ke*Le*Me*factor*sizeof(double)); xio = (double *) malloc(Ko*Lo*Mo*factor*sizeof(double)); xiobar = (double *) malloc(Ko*Lo*Mo*factor*sizeof(double)); gle = (double *) malloc(Ke*Le*Me*sizeof(double)); glo = (double *) malloc(Ko*Lo*Mo*sizeof(double)); memset(xie, 0, Ke*Le*Me*factor*sizeof(double)); memset(xio, 0, Ko*Lo*Mo*factor*sizeof(double)); gpuErrchk( hipMalloc((void**)&dev_xie, Ke*Le*Me*factor*sizeof(double)) ); gpuErrchk( hipMalloc((void**)&dev_xio, Ko*Lo*Mo*factor*sizeof(double)) ); gpuErrchk( hipMalloc((void**)&dev_xiobar, Ko*Lo*Mo*factor*sizeof(double)) ); gpuErrchk( hipMalloc((void**)&dev_xioswp, Ko*Lo*Mo*factor*sizeof(double)) ); gpuErrchk( hipMalloc((void**)&dev_u, sxyz*sizeof(double)) ); gpuErrchk( hipMalloc((void**)&dev_gle, Ke*Le*Me*sizeof(double)) ); gpuErrchk( hipMalloc((void**)&dev_glo, Ko*Lo*Mo*sizeof(double)) ); gpuErrchk( hipMemcpy(dev_xie, xie, Ke*Le*Me*factor*sizeof(double), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(dev_xio, xio, Ko*Lo*Mo*factor*sizeof(double), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(dev_xiobar, xiobar, Ko*Lo*Mo*factor*sizeof(double), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(dev_u, u, sxyz*sizeof(double), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(dev_gle, gle, Ke*Le*Me*sizeof(double), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(dev_glo, glo, Ko*Lo*Mo*sizeof(double), hipMemcpyHostToDevice) ); mem->dev_xie = dev_xie; mem->dev_xio = dev_xio; mem->dev_u = dev_u; mem->dev_xiobar = dev_xiobar; mem->dev_xioswp = dev_xioswp; mem->dev_gle = dev_gle; mem->dev_glo = dev_glo; return; } extern void free_memory(struct memory_cuda *mem) { gpuErrchk( hipFree(mem->dev_xie) ); gpuErrchk( hipFree(mem->dev_xio) ); gpuErrchk( hipFree(mem->dev_xiobar) ); gpuErrchk( hipFree(mem->dev_u) ); gpuErrchk( hipFree(mem->dev_gle) ); gpuErrchk( hipFree(mem->dev_glo) ); free(mem); return; }
4b308b6134d5b4517cc761980e92eedfff05a7ba.cu
#include <stdlib.h> #include "memory.cuh" // Handle errors raised by the GPU // From http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // Memory management extern void init_cuda() { double *dummy; gpuErrchk( cudaMalloc((void**) &dummy, sizeof(double)) ); gpuErrchk( cudaFree(dummy) ); } extern void init_memory(struct memory_cuda *mem, struct res_cuda *res, int sx, int sy, int sz, double* u) { double *xie, *xio; // xie -> even coordinates of xi, xio -> odd coordinates double *xiobar; // xiobar -> over-relaxation of xio double *gle, *glo; // gap arrays // GPU versions double *dev_xie, *dev_xio, *dev_u; // gpu versions double *dev_xiobar, *dev_xioswp; // gpu versions double *dev_gle, *dev_glo; // gap array res->it = 0; res->msec = 0; res->gap = 0; res->rmse = 0; int sxyz = sx * sy * sz; int Ke = sx/2, Le=sy/2; int Me = sz > 1 ? sz/2 : 1; int Ko = (sx-2)/2, Lo = (sy-2)/2; int Mo = sz > 1 ? (sz-2) : 1; int factor = sz > 1 ? 12 : 4; // Memory management xie = (double *) malloc(Ke*Le*Me*factor*sizeof(double)); xio = (double *) malloc(Ko*Lo*Mo*factor*sizeof(double)); xiobar = (double *) malloc(Ko*Lo*Mo*factor*sizeof(double)); gle = (double *) malloc(Ke*Le*Me*sizeof(double)); glo = (double *) malloc(Ko*Lo*Mo*sizeof(double)); memset(xie, 0, Ke*Le*Me*factor*sizeof(double)); memset(xio, 0, Ko*Lo*Mo*factor*sizeof(double)); gpuErrchk( cudaMalloc((void**)&dev_xie, Ke*Le*Me*factor*sizeof(double)) ); gpuErrchk( cudaMalloc((void**)&dev_xio, Ko*Lo*Mo*factor*sizeof(double)) ); gpuErrchk( cudaMalloc((void**)&dev_xiobar, Ko*Lo*Mo*factor*sizeof(double)) ); gpuErrchk( cudaMalloc((void**)&dev_xioswp, Ko*Lo*Mo*factor*sizeof(double)) ); gpuErrchk( cudaMalloc((void**)&dev_u, sxyz*sizeof(double)) ); gpuErrchk( cudaMalloc((void**)&dev_gle, Ke*Le*Me*sizeof(double)) ); gpuErrchk( cudaMalloc((void**)&dev_glo, Ko*Lo*Mo*sizeof(double)) ); gpuErrchk( cudaMemcpy(dev_xie, xie, Ke*Le*Me*factor*sizeof(double), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(dev_xio, xio, Ko*Lo*Mo*factor*sizeof(double), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(dev_xiobar, xiobar, Ko*Lo*Mo*factor*sizeof(double), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(dev_u, u, sxyz*sizeof(double), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(dev_gle, gle, Ke*Le*Me*sizeof(double), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(dev_glo, glo, Ko*Lo*Mo*sizeof(double), cudaMemcpyHostToDevice) ); mem->dev_xie = dev_xie; mem->dev_xio = dev_xio; mem->dev_u = dev_u; mem->dev_xiobar = dev_xiobar; mem->dev_xioswp = dev_xioswp; mem->dev_gle = dev_gle; mem->dev_glo = dev_glo; return; } extern void free_memory(struct memory_cuda *mem) { gpuErrchk( cudaFree(mem->dev_xie) ); gpuErrchk( cudaFree(mem->dev_xio) ); gpuErrchk( cudaFree(mem->dev_xiobar) ); gpuErrchk( cudaFree(mem->dev_u) ); gpuErrchk( cudaFree(mem->dev_gle) ); gpuErrchk( cudaFree(mem->dev_glo) ); free(mem); return; }
aa327ee04812b64b300bd1588beb2648280d03da.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. #include <ATen/native/transformers/hip/flash_attn/fmha_fwd_launch_template.h> void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &launch_params) { FP16_SWITCH(launch_params.params.is_bf16, ([&] { if (launch_params.params.seqlen_k == 128) { using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 4, 0x08u, elem_type>; run_fmha_fwd_loop<Kernel_traits>(launch_params); } else if (launch_params.params.seqlen_k >= 256) { using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u, elem_type>; run_fmha_fwd_loop<Kernel_traits>(launch_params); } })); }
aa327ee04812b64b300bd1588beb2648280d03da.cu
// Copyright (c) 2022, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. #include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h> void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &launch_params) { FP16_SWITCH(launch_params.params.is_bf16, ([&] { if (launch_params.params.seqlen_k == 128) { using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 4, 0x08u, elem_type>; run_fmha_fwd_loop<Kernel_traits>(launch_params); } else if (launch_params.params.seqlen_k >= 256) { using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u, elem_type>; run_fmha_fwd_loop<Kernel_traits>(launch_params); } })); }
c4aba6d3229e4054a21a490b662cb5fbd6ab51d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void addBlockThread(int* data) { int ind = blockDim.x * blockIdx.x + threadIdx.x; int sum = blockIdx.x + threadIdx.x; data[ind] = sum; printf("%6d %6d %6d\n", blockIdx.x, threadIdx.x, sum); } int main() { int num_blocks = 2; int num_threads = 8; dim3 dimGrid(num_blocks); dim3 dimBlock(num_threads); int num_ints = num_blocks * num_threads; int hostArray[num_ints]; int* devArray; // Allocate memory on the device. devArray is a pointer to the allocated // memory. hipMalloc((void**)&devArray, sizeof(int) * num_ints); // Invoke the device kernel which adds the block and thread indices printf("\nValues written to the device array:\n"); printf("%6s %6s %6s\n", "Block", "Thread", "Sum"); hipLaunchKernelGGL(( addBlockThread) , dim3(dimGrid), dim3(dimBlock), 0, 0, devArray); // Bring the results pointed to by devArray back to hostArray hipMemcpy(&hostArray, devArray, sizeof(int) * num_ints, hipMemcpyDeviceToHost); // Print the results printf("\nValues stored in the host array:\n"); for (int i = 0; i < num_ints; i++) printf("%d ", hostArray[i]); printf("\n"); // Free the device memory hipFree(devArray); return 0; }
c4aba6d3229e4054a21a490b662cb5fbd6ab51d6.cu
#include <stdio.h> __global__ void addBlockThread(int* data) { int ind = blockDim.x * blockIdx.x + threadIdx.x; int sum = blockIdx.x + threadIdx.x; data[ind] = sum; printf("%6d %6d %6d\n", blockIdx.x, threadIdx.x, sum); } int main() { int num_blocks = 2; int num_threads = 8; dim3 dimGrid(num_blocks); dim3 dimBlock(num_threads); int num_ints = num_blocks * num_threads; int hostArray[num_ints]; int* devArray; // Allocate memory on the device. devArray is a pointer to the allocated // memory. cudaMalloc((void**)&devArray, sizeof(int) * num_ints); // Invoke the device kernel which adds the block and thread indices printf("\nValues written to the device array:\n"); printf("%6s %6s %6s\n", "Block", "Thread", "Sum"); addBlockThread <<<dimGrid, dimBlock>>> (devArray); // Bring the results pointed to by devArray back to hostArray cudaMemcpy(&hostArray, devArray, sizeof(int) * num_ints, cudaMemcpyDeviceToHost); // Print the results printf("\nValues stored in the host array:\n"); for (int i = 0; i < num_ints; i++) printf("%d ", hostArray[i]); printf("\n"); // Free the device memory cudaFree(devArray); return 0; }
3436cafa0cf8e6ccfb21b11c5ac9f353676b3e47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Please choose a data type to compile #define DATATYPE 0 #include "../../marvin.hpp" #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <fstream> int main(int argc, char **argv){ if (argc != 5){ std::cout<<"Usage:"<<std::endl; std::cout<<argv[0]<<" network.json model1.marvin[,model2.marvin,...] categories.txt webcamID"<<std::endl; return 0; } // initialize the network marvin::Net net(argv[1]); net.Malloc(marvin::Testing); std::vector<std::string> models = marvin::getStringVector(argv[2]); for (int m=0;m<models.size();++m) net.loadWeights(models[m]); // where the data is marvin::Response* rData = net.responses[0]; marvin::Response* rResult = net.responses[net.responses.size()-1]; // image net list std::vector<std::string> objectCategories; objectCategories.resize(1000); std::ifstream fclass(argv[3]); for(int i=0;i<1000;++i){ std::getline(fclass,objectCategories[i]); } fclass.close(); // initialize video data feed from webcam cv::VideoCapture capture; capture.open(atoi(argv[4])); int width_webcam = 640; int height_webcam = 480; capture.set(CV_CAP_PROP_FRAME_WIDTH,width_webcam); capture.set(CV_CAP_PROP_FRAME_HEIGHT,height_webcam); if (!capture.isOpened()) { std::cerr << "Failed to open the video device, video file or image sequence!\n" << std::endl; return 1; } marvin::PlaceHolderDataLayer* pDataLayer = (marvin::PlaceHolderDataLayer*)net.layers[0]; int height_network = pDataLayer->dim[2]; int width_network = pDataLayer->dim[3]; int numel_network = width_network*height_network*3; //uint8_t* imageGPU_OCV; marvin::checkCUDA(__LINE__, hipMalloc(&imageGPU_OCV, width_network*height_network*3)); uint8_t* image_resize_memCPU; marvin::checkCUDA(__LINE__, hipHostMalloc( (void**)&image_resize_memCPU, width_network*height_network*3*sizeof(uint8_t), hipHostMallocWriteCombined | hipHostMallocMapped )); uint8_t* image_resize_memGPU; hipHostGetDevicePointer( &image_resize_memGPU, image_resize_memCPU, 0 ); cv::Mat image_original; cv::Mat image_resize(height_network,width_network,CV_8UC3,image_resize_memCPU); uint8_t* imageGPU; marvin::checkCUDA(__LINE__, hipMalloc(&imageGPU, width_network*height_network*3)); // allocate CPU for the host StorageT* rResult_CPU; marvin::checkCUDA(__LINE__, hipHostMalloc( (void**)&rResult_CPU, rResult->numBytes(), hipHostMallocWriteCombined | hipHostMallocMapped )); marvin::Tensor<StorageT>* cpuResult = new marvin::Tensor<StorageT>(rResult->dim, rResult_CPU); // replace the original GPU memory marvin::checkCUDA(__LINE__, hipFree(rResult->dataGPU)); hipHostGetDevicePointer( &(rResult->dataGPU), rResult_CPU, 0 ); std::cout<<"====================================================================================================================================="<<std::endl; // while it is running for(;;){ // read image data, e.g. using OpenCV to get an image from webcam marvin::tic(); capture >> image_original; std::cout<<"capture image: "; marvin::toc(); if (image_original.empty()) break; // resize image for the network marvin::tic(); cv::resize(image_original, image_resize, cv::Size(height_network,width_network)); std::cout<<"resize image: "; marvin::toc(); // copy the image from CPU to GPU //hipMemcpy(imageGPU_OCV, image_resize.data, height_network*width_network*3*sizeof(uint8_t), hipMemcpyHostToDevice); // convert the color image from OpenCV format (BGR with channel first) to Marvin format (CHW with RGB) marvin::tic(); marvin::OpenCV_BGR_image_to_Marvin(3, height_network, width_network, image_resize_memGPU, imageGPU); std::cout<<"OpenCV_BGR_image_to_Marvin: "; marvin::toc(); // convert image from uint8_t to StorageT on GPU marvin::tic(); hipLaunchKernelGGL(( marvin::Kernel_convert_to_StorageT_subtract), dim3(marvin::CUDA_GET_BLOCKS(numel_network)), dim3(CUDA_NUM_THREADS) , 0, 0, marvin::CUDA_GET_LOOPS(numel_network), numel_network, numel_network, imageGPU, pDataLayer->meanGPU, rData->dataGPU); std::cout<<"Kernel_convert_to_StorageT_subtract: "; marvin::toc(); // test the network marvin::tic(); net.forward(); std::cout<<"net.forward(): "; marvin::toc(); // read the result from GPU to CPU //marvin::tic(); //cpuResult->readGPU(rResult->dataGPU); //std::cout<<"cpuResult->readGPU: "; //marvin::toc(); // visualize the result or use the result imshow("Marvin webcam demo", image_resize); int iMax = 0; ComputeT vMax = 0; for(int i=0;i<1000;i++){ ComputeT v = CPUStorage2ComputeT(cpuResult->CPUmem[i]); if (v>vMax){ vMax = v; iMax = i; } } std::cout<<objectCategories[iMax]<<std::endl; //std::vector<int> display_dim = {10}; //cpuResult->print(display_dim); // any keyboard input char key = (char)cv::waitKey(30); if (key=='q' || key=='Q' || key==27) break; } marvin::checkCUDA(__LINE__, hipFree(image_resize_memGPU)); free(image_resize_memCPU); marvin::checkCUDA(__LINE__, hipFree(imageGPU)); delete cpuResult; return 0; }
3436cafa0cf8e6ccfb21b11c5ac9f353676b3e47.cu
// Please choose a data type to compile #define DATATYPE 0 #include "../../marvin.hpp" #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <fstream> int main(int argc, char **argv){ if (argc != 5){ std::cout<<"Usage:"<<std::endl; std::cout<<argv[0]<<" network.json model1.marvin[,model2.marvin,...] categories.txt webcamID"<<std::endl; return 0; } // initialize the network marvin::Net net(argv[1]); net.Malloc(marvin::Testing); std::vector<std::string> models = marvin::getStringVector(argv[2]); for (int m=0;m<models.size();++m) net.loadWeights(models[m]); // where the data is marvin::Response* rData = net.responses[0]; marvin::Response* rResult = net.responses[net.responses.size()-1]; // image net list std::vector<std::string> objectCategories; objectCategories.resize(1000); std::ifstream fclass(argv[3]); for(int i=0;i<1000;++i){ std::getline(fclass,objectCategories[i]); } fclass.close(); // initialize video data feed from webcam cv::VideoCapture capture; capture.open(atoi(argv[4])); int width_webcam = 640; int height_webcam = 480; capture.set(CV_CAP_PROP_FRAME_WIDTH,width_webcam); capture.set(CV_CAP_PROP_FRAME_HEIGHT,height_webcam); if (!capture.isOpened()) { std::cerr << "Failed to open the video device, video file or image sequence!\n" << std::endl; return 1; } marvin::PlaceHolderDataLayer* pDataLayer = (marvin::PlaceHolderDataLayer*)net.layers[0]; int height_network = pDataLayer->dim[2]; int width_network = pDataLayer->dim[3]; int numel_network = width_network*height_network*3; //uint8_t* imageGPU_OCV; marvin::checkCUDA(__LINE__, cudaMalloc(&imageGPU_OCV, width_network*height_network*3)); uint8_t* image_resize_memCPU; marvin::checkCUDA(__LINE__, cudaHostAlloc( (void**)&image_resize_memCPU, width_network*height_network*3*sizeof(uint8_t), cudaHostAllocWriteCombined | cudaHostAllocMapped )); uint8_t* image_resize_memGPU; cudaHostGetDevicePointer( &image_resize_memGPU, image_resize_memCPU, 0 ); cv::Mat image_original; cv::Mat image_resize(height_network,width_network,CV_8UC3,image_resize_memCPU); uint8_t* imageGPU; marvin::checkCUDA(__LINE__, cudaMalloc(&imageGPU, width_network*height_network*3)); // allocate CPU for the host StorageT* rResult_CPU; marvin::checkCUDA(__LINE__, cudaHostAlloc( (void**)&rResult_CPU, rResult->numBytes(), cudaHostAllocWriteCombined | cudaHostAllocMapped )); marvin::Tensor<StorageT>* cpuResult = new marvin::Tensor<StorageT>(rResult->dim, rResult_CPU); // replace the original GPU memory marvin::checkCUDA(__LINE__, cudaFree(rResult->dataGPU)); cudaHostGetDevicePointer( &(rResult->dataGPU), rResult_CPU, 0 ); std::cout<<"====================================================================================================================================="<<std::endl; // while it is running for(;;){ // read image data, e.g. using OpenCV to get an image from webcam marvin::tic(); capture >> image_original; std::cout<<"capture image: "; marvin::toc(); if (image_original.empty()) break; // resize image for the network marvin::tic(); cv::resize(image_original, image_resize, cv::Size(height_network,width_network)); std::cout<<"resize image: "; marvin::toc(); // copy the image from CPU to GPU //cudaMemcpy(imageGPU_OCV, image_resize.data, height_network*width_network*3*sizeof(uint8_t), cudaMemcpyHostToDevice); // convert the color image from OpenCV format (BGR with channel first) to Marvin format (CHW with RGB) marvin::tic(); marvin::OpenCV_BGR_image_to_Marvin(3, height_network, width_network, image_resize_memGPU, imageGPU); std::cout<<"OpenCV_BGR_image_to_Marvin: "; marvin::toc(); // convert image from uint8_t to StorageT on GPU marvin::tic(); marvin::Kernel_convert_to_StorageT_subtract<<<marvin::CUDA_GET_BLOCKS(numel_network), CUDA_NUM_THREADS >>>(marvin::CUDA_GET_LOOPS(numel_network), numel_network, numel_network, imageGPU, pDataLayer->meanGPU, rData->dataGPU); std::cout<<"Kernel_convert_to_StorageT_subtract: "; marvin::toc(); // test the network marvin::tic(); net.forward(); std::cout<<"net.forward(): "; marvin::toc(); // read the result from GPU to CPU //marvin::tic(); //cpuResult->readGPU(rResult->dataGPU); //std::cout<<"cpuResult->readGPU: "; //marvin::toc(); // visualize the result or use the result imshow("Marvin webcam demo", image_resize); int iMax = 0; ComputeT vMax = 0; for(int i=0;i<1000;i++){ ComputeT v = CPUStorage2ComputeT(cpuResult->CPUmem[i]); if (v>vMax){ vMax = v; iMax = i; } } std::cout<<objectCategories[iMax]<<std::endl; //std::vector<int> display_dim = {10}; //cpuResult->print(display_dim); // any keyboard input char key = (char)cv::waitKey(30); if (key=='q' || key=='Q' || key==27) break; } marvin::checkCUDA(__LINE__, cudaFree(image_resize_memGPU)); free(image_resize_memCPU); marvin::checkCUDA(__LINE__, cudaFree(imageGPU)); delete cpuResult; return 0; }
4d09d542fa9f5f1269dab5f286859551b3c35893.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This code is part of the hw1 of multicore programming in SYSU * Copyright (c) 2020 Hongzheng Chen * Email: [email protected] * * This file is the kernel part of CUDA implementation * that calculates the central entropy of each point in a matrix. * * This program is an optimized implementation using shared memory. */ #include "core.h" #define blockW 16 #define blockH 16 #define RADIUS 2 #define padW (blockW + 2 * RADIUS) #define padH (blockH + 2 * RADIUS) /*! * Core execution part of CUDA * that calculates the central entropy of each point. * \param size The size of the input matrix. * \param width The width of the input matrix. * \param height The height of the input matrix. * \param input The input matrix. * \param output The output matrix. * \return void. Results will be put in output. */ __global__ void kernel(int size, int width, int height, float *input, float *output) { // true index (x,y) const int x = blockIdx.x * blockW + threadIdx.x - RADIUS; const int y = blockIdx.y * blockH + threadIdx.y - RADIUS; const int idx = y * width + x; // thread index (tx,ty) (with padding) const int tx = threadIdx.x; const int ty = threadIdx.y; // copy data from global memory to shared memory (with padding) __shared__ float smem[padH][padW]; if (x >= 0 && x < width && y >= 0 && y < height) { smem[ty][tx] = input[idx]; } __syncthreads(); // only those threads in the window need to be calculated if (x >= 0 && x < width && y >= 0 && y < height && tx >= RADIUS && tx < padW - RADIUS && ty >= RADIUS && ty < padH - RADIUS) { // each thread first counts the histogram of idx int cnt[16] = {0}; // histogram int valid = 0; for (int i = -2; i < 3; ++i) for (int j = -2; j < 3; ++j) { if (y + i >= 0 && y + i < height && x + j >= 0 && x + j < width) { int in = smem[ty + i][tx + j]; cnt[in]++; valid++; } } // calculate entropy float sum = 0; for (int i = 0; i < 16; ++i) { int ni = cnt[i]; if (ni != 0) { #ifdef LOOKUP sum += ni * log_table[ni]; #else sum += ni * logf(ni); #endif } } #ifdef LOOKUP output[idx] = -sum / valid + log_table[valid]; #else output[idx] = -sum / valid + logf(valid); #endif } } /*! * Wrapper of the CUDA kernel * used to be called in the main function * \param width The width of the input matrix. * \param height The height of the input matrix. * \param sample The input matrix. * \param result The output matrix. * \return void. Results will be put in result. */ void cudaCallback(int width, int height, float *sample, float **result) { int size = width * height; float *input_d, *output_d; // Allocate device memory and copy data from host to device CHECK(hipMalloc((void **)&input_d, sizeof(float)*size)); CHECK(hipMalloc((void **)&output_d, sizeof(float)*size)); CHECK(hipMemcpy(input_d, sample, sizeof(float)*size, hipMemcpyHostToDevice)); printf("grid: %d %d\n",divup(width, blockW),divup(height, blockH)); printf("block size: %d %d\n",blockW,blockH); printf("pad block size (thread): %d %d\n",padW,padH); // Invoke the device function const dim3 grid(divup(width, blockW), divup(height, blockH)); const dim3 threadBlock(padW, padH); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threadBlock) , 0, 0, size, width, height, input_d, output_d); hipDeviceSynchronize(); // Copy back the results and de-allocate the device memory *result = (float *)malloc(sizeof(float)*size); CHECK(hipMemcpy(*result, output_d, sizeof(float)*size, hipMemcpyDeviceToHost)); CHECK(hipFree(input_d)); CHECK(hipFree(output_d)); // Note that you don't have to free sample and *result by yourself }
4d09d542fa9f5f1269dab5f286859551b3c35893.cu
/* * This code is part of the hw1 of multicore programming in SYSU * Copyright (c) 2020 Hongzheng Chen * Email: [email protected] * * This file is the kernel part of CUDA implementation * that calculates the central entropy of each point in a matrix. * * This program is an optimized implementation using shared memory. */ #include "core.h" #define blockW 16 #define blockH 16 #define RADIUS 2 #define padW (blockW + 2 * RADIUS) #define padH (blockH + 2 * RADIUS) /*! * Core execution part of CUDA * that calculates the central entropy of each point. * \param size The size of the input matrix. * \param width The width of the input matrix. * \param height The height of the input matrix. * \param input The input matrix. * \param output The output matrix. * \return void. Results will be put in output. */ __global__ void kernel(int size, int width, int height, float *input, float *output) { // true index (x,y) const int x = blockIdx.x * blockW + threadIdx.x - RADIUS; const int y = blockIdx.y * blockH + threadIdx.y - RADIUS; const int idx = y * width + x; // thread index (tx,ty) (with padding) const int tx = threadIdx.x; const int ty = threadIdx.y; // copy data from global memory to shared memory (with padding) __shared__ float smem[padH][padW]; if (x >= 0 && x < width && y >= 0 && y < height) { smem[ty][tx] = input[idx]; } __syncthreads(); // only those threads in the window need to be calculated if (x >= 0 && x < width && y >= 0 && y < height && tx >= RADIUS && tx < padW - RADIUS && ty >= RADIUS && ty < padH - RADIUS) { // each thread first counts the histogram of idx int cnt[16] = {0}; // histogram int valid = 0; for (int i = -2; i < 3; ++i) for (int j = -2; j < 3; ++j) { if (y + i >= 0 && y + i < height && x + j >= 0 && x + j < width) { int in = smem[ty + i][tx + j]; cnt[in]++; valid++; } } // calculate entropy float sum = 0; for (int i = 0; i < 16; ++i) { int ni = cnt[i]; if (ni != 0) { #ifdef LOOKUP sum += ni * log_table[ni]; #else sum += ni * logf(ni); #endif } } #ifdef LOOKUP output[idx] = -sum / valid + log_table[valid]; #else output[idx] = -sum / valid + logf(valid); #endif } } /*! * Wrapper of the CUDA kernel * used to be called in the main function * \param width The width of the input matrix. * \param height The height of the input matrix. * \param sample The input matrix. * \param result The output matrix. * \return void. Results will be put in result. */ void cudaCallback(int width, int height, float *sample, float **result) { int size = width * height; float *input_d, *output_d; // Allocate device memory and copy data from host to device CHECK(cudaMalloc((void **)&input_d, sizeof(float)*size)); CHECK(cudaMalloc((void **)&output_d, sizeof(float)*size)); CHECK(cudaMemcpy(input_d, sample, sizeof(float)*size, cudaMemcpyHostToDevice)); printf("grid: %d %d\n",divup(width, blockW),divup(height, blockH)); printf("block size: %d %d\n",blockW,blockH); printf("pad block size (thread): %d %d\n",padW,padH); // Invoke the device function const dim3 grid(divup(width, blockW), divup(height, blockH)); const dim3 threadBlock(padW, padH); kernel<<< grid, threadBlock >>>(size, width, height, input_d, output_d); cudaDeviceSynchronize(); // Copy back the results and de-allocate the device memory *result = (float *)malloc(sizeof(float)*size); CHECK(cudaMemcpy(*result, output_d, sizeof(float)*size, cudaMemcpyDeviceToHost)); CHECK(cudaFree(input_d)); CHECK(cudaFree(output_d)); // Note that you don't have to free sample and *result by yourself }
1329a53e53eb0f15f115ae1f48025a182620658f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * JCuda - Java bindings for NVIDIA CUDA driver and runtime API * http://www.jcuda.org * * * This code is based on the NVIDIA 'reduction' CUDA sample, * Copyright 1993-2010 NVIDIA Corporation. */ extern "C" __global__ void hello(float *g_idata,unsigned int n) { // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; printf("Hello world %d - %d \n",tid,i); }
1329a53e53eb0f15f115ae1f48025a182620658f.cu
/* * JCuda - Java bindings for NVIDIA CUDA driver and runtime API * http://www.jcuda.org * * * This code is based on the NVIDIA 'reduction' CUDA sample, * Copyright 1993-2010 NVIDIA Corporation. */ extern "C" __global__ void hello(float *g_idata,unsigned int n) { // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; printf("Hello world %d - %d \n",tid,i); }
3e5ccddabaa7697751f2cee88b9b3517fa9b972f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "operators/misc/proposal_op.h" #include "utils/cuda_device.h" namespace dragon { template <typename Dtype> __device__ static int transform_box(Dtype box[], const Dtype dx, const Dtype dy, const Dtype d_log_w, const Dtype d_log_h, const Dtype img_W, const Dtype img_H, const Dtype min_box_W, const Dtype min_box_H) { // width & height of box const Dtype w = box[2] - box[0] + (Dtype)1; const Dtype h = box[3] - box[1] + (Dtype)1; // center location of box const Dtype ctr_x = box[0] + (Dtype)0.5 * w; const Dtype ctr_y = box[1] + (Dtype)0.5 * h; // new center location according to gradient (dx, dy) const Dtype pred_ctr_x = dx * w + ctr_x; const Dtype pred_ctr_y = dy * h + ctr_y; // new width & height according to gradient d(log w), d(log h) const Dtype pred_w = exp(d_log_w) * w; const Dtype pred_h = exp(d_log_h) * h; // update upper-left corner location box[0] = pred_ctr_x - (Dtype)0.5 * pred_w; box[1] = pred_ctr_y - (Dtype)0.5 * pred_h; // update lower-right corner location box[2] = pred_ctr_x + (Dtype)0.5 * pred_w; box[3] = pred_ctr_y + (Dtype)0.5 * pred_h; // adjust new corner locations to be within the image region, box[0] = max((Dtype)0, min(box[0], img_W - (Dtype)1)); box[1] = max((Dtype)0, min(box[1], img_H - (Dtype)1)); box[2] = max((Dtype)0, min(box[2], img_W - (Dtype)1)); box[3] = max((Dtype)0, min(box[3], img_H - (Dtype)1)); // recompute new width & height const Dtype box_w = box[2] - box[0] + (Dtype)1; const Dtype box_h = box[3] - box[1] + (Dtype)1; // check if new box's size >= threshold return (box_w >= min_box_W) * (box_h >= min_box_H); } template <typename Dtype> static void sort_box(Dtype* list_cpu, const int start, const int end, const int num_top) { const Dtype pivot_score = list_cpu[start * 5 + 4]; int left = start + 1, right = end; Dtype temp[5]; while (left <= right) { while (left <= end && list_cpu[left * 5 + 4] >= pivot_score) ++left; while (right > start && list_cpu[right * 5 + 4] <= pivot_score) --right; if (left <= right) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[left * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[left * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } ++left; --right; } } if (right > start) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[start * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[start * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } } if (start < right - 1) { sort_box(list_cpu, start, right - 1, num_top); } if (right + 1 < num_top && right + 1 < end) { sort_box(list_cpu, right + 1, end, num_top); } } template <typename Dtype> __global__ static void enumerate_proposals_gpu(const int nthreads, const Dtype bottom4d[], const Dtype d_anchor4d[], const Dtype anchors[], Dtype proposals[], const int num_anchors, const int bottom_H, const int bottom_W, const Dtype img_H, const Dtype img_W, const Dtype min_box_H, const Dtype min_box_W, const int feat_stride) { CUDA_KERNEL_LOOP(index, nthreads) { const int h = index / num_anchors / bottom_W; const int w = (index / num_anchors) % bottom_W; const int k = index % num_anchors; const Dtype x = w * feat_stride; const Dtype y = h * feat_stride; const Dtype* p_box = d_anchor4d + h * bottom_W + w; const Dtype* p_score = bottom4d + h * bottom_W + w; const int bottom_area = bottom_H * bottom_W; const Dtype dx = p_box[(k * 4 + 0) * bottom_area]; const Dtype dy = p_box[(k * 4 + 1) * bottom_area]; const Dtype d_log_w = p_box[(k * 4 + 2) * bottom_area]; const Dtype d_log_h = p_box[(k * 4 + 3) * bottom_area]; Dtype* const p_proposal = proposals + index * 5; p_proposal[0] = x + anchors[k * 4 + 0]; p_proposal[1] = y + anchors[k * 4 + 1]; p_proposal[2] = x + anchors[k * 4 + 2]; p_proposal[3] = y + anchors[k * 4 + 3]; p_proposal[4] = transform_box(p_proposal, dx, dy, d_log_w, d_log_h, img_W, img_H, min_box_W, min_box_H) * p_score[k * bottom_area]; } } template <typename Dtype> __global__ static void retrieve_rois_gpu(const int nthreads, const int item_index, const Dtype proposals[], const int roi_indices[], Dtype rois[], Dtype roi_scores[]) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* const proposals_index = proposals + roi_indices[index] * 5; rois[index * 5 + 0] = item_index; rois[index * 5 + 1] = proposals_index[0]; rois[index * 5 + 2] = proposals_index[1]; rois[index * 5 + 3] = proposals_index[2]; rois[index * 5 + 4] = proposals_index[3]; if (roi_scores) { roi_scores[index] = proposals_index[4]; } } } template <typename Dtype> __device__ static Dtype iou(const Dtype A[], const Dtype B[]) { // overlapped region (= box) const Dtype x1 = max(A[0], B[0]); const Dtype y1 = max(A[1], B[1]); const Dtype x2 = min(A[2], B[2]); const Dtype y2 = min(A[3], B[3]); // intersection area const Dtype width = max((Dtype)0, x2 - x1 + (Dtype)1); const Dtype height = max((Dtype)0, y2 - y1 + (Dtype)1); const Dtype area = width * height; // area of A, B const Dtype A_area = (A[2] - A[0] + (Dtype)1) * (A[3] - A[1] + (Dtype)1); const Dtype B_area = (B[2] - B[0] + (Dtype)1) * (B[3] - B[1] + (Dtype)1); // IoU return area / (A_area + B_area - area); } #define DIV_THEN_CEIL(x, y) (((x) + (y) - 1) / (y)) static const int nms_block_size = 64; template <typename Dtype> __global__ static void nms_mask(const Dtype boxes[], unsigned long long mask[], const int num_boxes, const Dtype nms_thresh) { // block region // j = j_start + { 0, ..., dj_end - 1 } // i = i_start + { 0, ..., di_end - 1 } const int i_start = blockIdx.x * nms_block_size; const int di_end = min(num_boxes - i_start, nms_block_size); const int j_start = blockIdx.y * nms_block_size; const int dj_end = min(num_boxes - j_start, nms_block_size); // copy all i-th boxes to GPU cache // i = i_start + { 0, ..., di_end - 1 } __shared__ Dtype boxes_i[nms_block_size * 4]; { const int di = threadIdx.x; if (di < di_end) { boxes_i[di * 4 + 0] = boxes[(i_start + di) * 5 + 0]; boxes_i[di * 4 + 1] = boxes[(i_start + di) * 5 + 1]; boxes_i[di * 4 + 2] = boxes[(i_start + di) * 5 + 2]; boxes_i[di * 4 + 3] = boxes[(i_start + di) * 5 + 3]; } } __syncthreads(); // given j = j_start + dj, // check whether box i is significantly overlapped with box j // (i.e., IoU(box j, box i) > threshold) // for all i = i_start + { 0, ..., di_end - 1 } except for i == j { const int dj = threadIdx.x; if (dj < dj_end) { // box j const Dtype* const box_j = boxes + (j_start + dj) * 5; // mask for significant overlap // if IoU(box j, box i) > threshold, di-th bit = 1 unsigned long long mask_j = 0; // check for all i = i_start + { 0, ..., di_end - 1 } // except for i == j const int di_start = (i_start == j_start) ? (dj + 1) : 0; for (int di = di_start; di < di_end; ++di) { // box i const Dtype* const box_i = boxes_i + di * 4; // if IoU(box j, box i) > threshold, di-th bit = 1 if (iou(box_j, box_i) > nms_thresh) { mask_j |= 1ULL << di; } } // mask: "num_boxes x num_blocks" array // for mask[j][bi], "di-th bit = 1" means: // box j is significantly overlapped with box i = i_start + di, // where i_start = bi * block_size { const int num_blocks = DIV_THEN_CEIL(num_boxes, nms_block_size); const int bi = blockIdx.x; mask[(j_start + dj) * num_blocks + bi] = mask_j; } } // endif dj < dj_end } } template <typename Dtype> void nms_gpu(const int num_boxes, const Dtype boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const Dtype nms_thresh, const int max_num_out) { const int num_blocks = DIV_THEN_CEIL(num_boxes, nms_block_size); { const dim3 blocks(num_blocks, num_blocks); vector<TIndex> mask_shape(2); mask_shape[0] = num_boxes; mask_shape[1] = num_blocks * sizeof(unsigned long long) / sizeof(int); p_mask->Reshape(mask_shape); // find all significantly-overlapped pairs of boxes nms_mask << <blocks, nms_block_size >> >( boxes_gpu, (unsigned long long*)p_mask->template mutable_data<int, CUDAContext>(), num_boxes, nms_thresh); CUDA_POST_KERNEL_CHECK; } // discard i-th box if it is significantly overlapped with // one or more previous (= scored higher) boxes { const unsigned long long* p_mask_cpu = (unsigned long long*)p_mask->mutable_data<int, CPUContext>(); int num_selected = 0; vector<unsigned long long> dead_bit(num_blocks); for (int i = 0; i < num_blocks; ++i) { dead_bit[i] = 0; } for (int i = 0; i < num_boxes; ++i) { const int nblock = i / nms_block_size; const int inblock = i % nms_block_size; if (!(dead_bit[nblock] & (1ULL << inblock))) { index_out_cpu[num_selected++] = base_index + i; const unsigned long long* const mask_i = p_mask_cpu + i * num_blocks; for (int j = nblock; j < num_blocks; ++j) { dead_bit[j] |= mask_i[j]; } if (num_selected == max_num_out) { break; } } } *num_out = num_selected; } } template void nms_gpu(const int num_boxes, const float boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const float nms_thresh, const int max_num_out); template void nms_gpu(const int num_boxes, const double boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const double nms_thresh, const int max_num_out); template <class Context> template <typename T> void ProposalOp<Context>::RunWithType() { auto* p_bottom_item = this->input(0).template data<T, CUDAContext>(); auto* p_d_anchor_item = this->input(1).template data<T, CUDAContext>(); auto* p_img_info_cpu = this->input(2).template data<T, CPUContext>(); auto* p_roi_item = this->output(0)->template mutable_data<T, CUDAContext>(); auto* p_score_item = (this->OutputSize() > 1) ? this->output(1)->template mutable_data<T, CUDAContext>() : NULL; vector<TIndex> proposals_shape(2), top_shape(2); proposals_shape[0] = 0; proposals_shape[1] = 5; top_shape[0] = 0; top_shape[1] = 5; for (int n = 0; n < this->input(0).dim(0); ++n) { // bottom shape: (2 x num_anchors) x H x W const int bottom_H = this->input(0).dim(2); const int bottom_W = this->input(0).dim(3); // input image height & width const T img_H = p_img_info_cpu[0]; const T img_W = p_img_info_cpu[1]; // scale factor for height & width const T scale_H = p_img_info_cpu[2]; const T scale_W = p_img_info_cpu[3]; // minimum box width & height const T min_box_H = min_size_ * scale_H; const T min_box_W = min_size_ * scale_W; // number of all proposals = num_anchors * H * W const int num_proposals = anchors_.dim(0) * bottom_H * bottom_W; // number of top-n proposals before NMS const int pre_nms_topn = ::min(num_proposals, pre_nms_topn_); // number of final RoIs int num_rois = 0; // enumerate all proposals // num_proposals = num_anchors * H * W // (x1, y1, x2, y2, score) for each proposal // NOTE: for bottom, only foreground scores are passed proposals_shape[0] = num_proposals; proposals_.Reshape(proposals_shape); enumerate_proposals_gpu<T> << <GET_BLOCKS(num_proposals), CUDA_NUM_THREADS >> >(num_proposals, p_bottom_item + num_proposals, p_d_anchor_item, anchors_.template data<T, CUDAContext>(), proposals_.template mutable_data<T, CUDAContext>(), anchors_.dim(0), bottom_H, bottom_W, img_H, img_W, min_box_H, min_box_W, feat_stride_); CUDA_POST_KERNEL_CHECK; sort_box<T>(proposals_.template mutable_data<T, CPUContext>(), 0, num_proposals - 1, pre_nms_topn_); nms_gpu<T>(pre_nms_topn, proposals_.template data<T, CUDAContext>(), &nms_mask_, roi_indices_.template mutable_data<int, CPUContext>(), &num_rois, 0, nms_thresh_, post_nms_topn_); retrieve_rois_gpu<T> << <GET_BLOCKS(num_rois), CUDA_NUM_THREADS >> >(num_rois, n, proposals_.template data<T, CUDAContext>(), roi_indices_.template data<int, CUDAContext>(), p_roi_item, p_score_item); CUDA_POST_KERNEL_CHECK; top_shape[0] += num_rois; } this->output(0)->Reshape(top_shape); if (this->OutputSize() > 1) { top_shape.pop_back(); this->output(1)->Reshape(top_shape); } } template void ProposalOp<CUDAContext>::RunWithType<float>(); }
3e5ccddabaa7697751f2cee88b9b3517fa9b972f.cu
#include "operators/misc/proposal_op.h" #include "utils/cuda_device.h" namespace dragon { template <typename Dtype> __device__ static int transform_box(Dtype box[], const Dtype dx, const Dtype dy, const Dtype d_log_w, const Dtype d_log_h, const Dtype img_W, const Dtype img_H, const Dtype min_box_W, const Dtype min_box_H) { // width & height of box const Dtype w = box[2] - box[0] + (Dtype)1; const Dtype h = box[3] - box[1] + (Dtype)1; // center location of box const Dtype ctr_x = box[0] + (Dtype)0.5 * w; const Dtype ctr_y = box[1] + (Dtype)0.5 * h; // new center location according to gradient (dx, dy) const Dtype pred_ctr_x = dx * w + ctr_x; const Dtype pred_ctr_y = dy * h + ctr_y; // new width & height according to gradient d(log w), d(log h) const Dtype pred_w = exp(d_log_w) * w; const Dtype pred_h = exp(d_log_h) * h; // update upper-left corner location box[0] = pred_ctr_x - (Dtype)0.5 * pred_w; box[1] = pred_ctr_y - (Dtype)0.5 * pred_h; // update lower-right corner location box[2] = pred_ctr_x + (Dtype)0.5 * pred_w; box[3] = pred_ctr_y + (Dtype)0.5 * pred_h; // adjust new corner locations to be within the image region, box[0] = max((Dtype)0, min(box[0], img_W - (Dtype)1)); box[1] = max((Dtype)0, min(box[1], img_H - (Dtype)1)); box[2] = max((Dtype)0, min(box[2], img_W - (Dtype)1)); box[3] = max((Dtype)0, min(box[3], img_H - (Dtype)1)); // recompute new width & height const Dtype box_w = box[2] - box[0] + (Dtype)1; const Dtype box_h = box[3] - box[1] + (Dtype)1; // check if new box's size >= threshold return (box_w >= min_box_W) * (box_h >= min_box_H); } template <typename Dtype> static void sort_box(Dtype* list_cpu, const int start, const int end, const int num_top) { const Dtype pivot_score = list_cpu[start * 5 + 4]; int left = start + 1, right = end; Dtype temp[5]; while (left <= right) { while (left <= end && list_cpu[left * 5 + 4] >= pivot_score) ++left; while (right > start && list_cpu[right * 5 + 4] <= pivot_score) --right; if (left <= right) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[left * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[left * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } ++left; --right; } } if (right > start) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[start * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[start * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } } if (start < right - 1) { sort_box(list_cpu, start, right - 1, num_top); } if (right + 1 < num_top && right + 1 < end) { sort_box(list_cpu, right + 1, end, num_top); } } template <typename Dtype> __global__ static void enumerate_proposals_gpu(const int nthreads, const Dtype bottom4d[], const Dtype d_anchor4d[], const Dtype anchors[], Dtype proposals[], const int num_anchors, const int bottom_H, const int bottom_W, const Dtype img_H, const Dtype img_W, const Dtype min_box_H, const Dtype min_box_W, const int feat_stride) { CUDA_KERNEL_LOOP(index, nthreads) { const int h = index / num_anchors / bottom_W; const int w = (index / num_anchors) % bottom_W; const int k = index % num_anchors; const Dtype x = w * feat_stride; const Dtype y = h * feat_stride; const Dtype* p_box = d_anchor4d + h * bottom_W + w; const Dtype* p_score = bottom4d + h * bottom_W + w; const int bottom_area = bottom_H * bottom_W; const Dtype dx = p_box[(k * 4 + 0) * bottom_area]; const Dtype dy = p_box[(k * 4 + 1) * bottom_area]; const Dtype d_log_w = p_box[(k * 4 + 2) * bottom_area]; const Dtype d_log_h = p_box[(k * 4 + 3) * bottom_area]; Dtype* const p_proposal = proposals + index * 5; p_proposal[0] = x + anchors[k * 4 + 0]; p_proposal[1] = y + anchors[k * 4 + 1]; p_proposal[2] = x + anchors[k * 4 + 2]; p_proposal[3] = y + anchors[k * 4 + 3]; p_proposal[4] = transform_box(p_proposal, dx, dy, d_log_w, d_log_h, img_W, img_H, min_box_W, min_box_H) * p_score[k * bottom_area]; } } template <typename Dtype> __global__ static void retrieve_rois_gpu(const int nthreads, const int item_index, const Dtype proposals[], const int roi_indices[], Dtype rois[], Dtype roi_scores[]) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* const proposals_index = proposals + roi_indices[index] * 5; rois[index * 5 + 0] = item_index; rois[index * 5 + 1] = proposals_index[0]; rois[index * 5 + 2] = proposals_index[1]; rois[index * 5 + 3] = proposals_index[2]; rois[index * 5 + 4] = proposals_index[3]; if (roi_scores) { roi_scores[index] = proposals_index[4]; } } } template <typename Dtype> __device__ static Dtype iou(const Dtype A[], const Dtype B[]) { // overlapped region (= box) const Dtype x1 = max(A[0], B[0]); const Dtype y1 = max(A[1], B[1]); const Dtype x2 = min(A[2], B[2]); const Dtype y2 = min(A[3], B[3]); // intersection area const Dtype width = max((Dtype)0, x2 - x1 + (Dtype)1); const Dtype height = max((Dtype)0, y2 - y1 + (Dtype)1); const Dtype area = width * height; // area of A, B const Dtype A_area = (A[2] - A[0] + (Dtype)1) * (A[3] - A[1] + (Dtype)1); const Dtype B_area = (B[2] - B[0] + (Dtype)1) * (B[3] - B[1] + (Dtype)1); // IoU return area / (A_area + B_area - area); } #define DIV_THEN_CEIL(x, y) (((x) + (y) - 1) / (y)) static const int nms_block_size = 64; template <typename Dtype> __global__ static void nms_mask(const Dtype boxes[], unsigned long long mask[], const int num_boxes, const Dtype nms_thresh) { // block region // j = j_start + { 0, ..., dj_end - 1 } // i = i_start + { 0, ..., di_end - 1 } const int i_start = blockIdx.x * nms_block_size; const int di_end = min(num_boxes - i_start, nms_block_size); const int j_start = blockIdx.y * nms_block_size; const int dj_end = min(num_boxes - j_start, nms_block_size); // copy all i-th boxes to GPU cache // i = i_start + { 0, ..., di_end - 1 } __shared__ Dtype boxes_i[nms_block_size * 4]; { const int di = threadIdx.x; if (di < di_end) { boxes_i[di * 4 + 0] = boxes[(i_start + di) * 5 + 0]; boxes_i[di * 4 + 1] = boxes[(i_start + di) * 5 + 1]; boxes_i[di * 4 + 2] = boxes[(i_start + di) * 5 + 2]; boxes_i[di * 4 + 3] = boxes[(i_start + di) * 5 + 3]; } } __syncthreads(); // given j = j_start + dj, // check whether box i is significantly overlapped with box j // (i.e., IoU(box j, box i) > threshold) // for all i = i_start + { 0, ..., di_end - 1 } except for i == j { const int dj = threadIdx.x; if (dj < dj_end) { // box j const Dtype* const box_j = boxes + (j_start + dj) * 5; // mask for significant overlap // if IoU(box j, box i) > threshold, di-th bit = 1 unsigned long long mask_j = 0; // check for all i = i_start + { 0, ..., di_end - 1 } // except for i == j const int di_start = (i_start == j_start) ? (dj + 1) : 0; for (int di = di_start; di < di_end; ++di) { // box i const Dtype* const box_i = boxes_i + di * 4; // if IoU(box j, box i) > threshold, di-th bit = 1 if (iou(box_j, box_i) > nms_thresh) { mask_j |= 1ULL << di; } } // mask: "num_boxes x num_blocks" array // for mask[j][bi], "di-th bit = 1" means: // box j is significantly overlapped with box i = i_start + di, // where i_start = bi * block_size { const int num_blocks = DIV_THEN_CEIL(num_boxes, nms_block_size); const int bi = blockIdx.x; mask[(j_start + dj) * num_blocks + bi] = mask_j; } } // endif dj < dj_end } } template <typename Dtype> void nms_gpu(const int num_boxes, const Dtype boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const Dtype nms_thresh, const int max_num_out) { const int num_blocks = DIV_THEN_CEIL(num_boxes, nms_block_size); { const dim3 blocks(num_blocks, num_blocks); vector<TIndex> mask_shape(2); mask_shape[0] = num_boxes; mask_shape[1] = num_blocks * sizeof(unsigned long long) / sizeof(int); p_mask->Reshape(mask_shape); // find all significantly-overlapped pairs of boxes nms_mask << <blocks, nms_block_size >> >( boxes_gpu, (unsigned long long*)p_mask->template mutable_data<int, CUDAContext>(), num_boxes, nms_thresh); CUDA_POST_KERNEL_CHECK; } // discard i-th box if it is significantly overlapped with // one or more previous (= scored higher) boxes { const unsigned long long* p_mask_cpu = (unsigned long long*)p_mask->mutable_data<int, CPUContext>(); int num_selected = 0; vector<unsigned long long> dead_bit(num_blocks); for (int i = 0; i < num_blocks; ++i) { dead_bit[i] = 0; } for (int i = 0; i < num_boxes; ++i) { const int nblock = i / nms_block_size; const int inblock = i % nms_block_size; if (!(dead_bit[nblock] & (1ULL << inblock))) { index_out_cpu[num_selected++] = base_index + i; const unsigned long long* const mask_i = p_mask_cpu + i * num_blocks; for (int j = nblock; j < num_blocks; ++j) { dead_bit[j] |= mask_i[j]; } if (num_selected == max_num_out) { break; } } } *num_out = num_selected; } } template void nms_gpu(const int num_boxes, const float boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const float nms_thresh, const int max_num_out); template void nms_gpu(const int num_boxes, const double boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const double nms_thresh, const int max_num_out); template <class Context> template <typename T> void ProposalOp<Context>::RunWithType() { auto* p_bottom_item = this->input(0).template data<T, CUDAContext>(); auto* p_d_anchor_item = this->input(1).template data<T, CUDAContext>(); auto* p_img_info_cpu = this->input(2).template data<T, CPUContext>(); auto* p_roi_item = this->output(0)->template mutable_data<T, CUDAContext>(); auto* p_score_item = (this->OutputSize() > 1) ? this->output(1)->template mutable_data<T, CUDAContext>() : NULL; vector<TIndex> proposals_shape(2), top_shape(2); proposals_shape[0] = 0; proposals_shape[1] = 5; top_shape[0] = 0; top_shape[1] = 5; for (int n = 0; n < this->input(0).dim(0); ++n) { // bottom shape: (2 x num_anchors) x H x W const int bottom_H = this->input(0).dim(2); const int bottom_W = this->input(0).dim(3); // input image height & width const T img_H = p_img_info_cpu[0]; const T img_W = p_img_info_cpu[1]; // scale factor for height & width const T scale_H = p_img_info_cpu[2]; const T scale_W = p_img_info_cpu[3]; // minimum box width & height const T min_box_H = min_size_ * scale_H; const T min_box_W = min_size_ * scale_W; // number of all proposals = num_anchors * H * W const int num_proposals = anchors_.dim(0) * bottom_H * bottom_W; // number of top-n proposals before NMS const int pre_nms_topn = std::min(num_proposals, pre_nms_topn_); // number of final RoIs int num_rois = 0; // enumerate all proposals // num_proposals = num_anchors * H * W // (x1, y1, x2, y2, score) for each proposal // NOTE: for bottom, only foreground scores are passed proposals_shape[0] = num_proposals; proposals_.Reshape(proposals_shape); enumerate_proposals_gpu<T> << <GET_BLOCKS(num_proposals), CUDA_NUM_THREADS >> >(num_proposals, p_bottom_item + num_proposals, p_d_anchor_item, anchors_.template data<T, CUDAContext>(), proposals_.template mutable_data<T, CUDAContext>(), anchors_.dim(0), bottom_H, bottom_W, img_H, img_W, min_box_H, min_box_W, feat_stride_); CUDA_POST_KERNEL_CHECK; sort_box<T>(proposals_.template mutable_data<T, CPUContext>(), 0, num_proposals - 1, pre_nms_topn_); nms_gpu<T>(pre_nms_topn, proposals_.template data<T, CUDAContext>(), &nms_mask_, roi_indices_.template mutable_data<int, CPUContext>(), &num_rois, 0, nms_thresh_, post_nms_topn_); retrieve_rois_gpu<T> << <GET_BLOCKS(num_rois), CUDA_NUM_THREADS >> >(num_rois, n, proposals_.template data<T, CUDAContext>(), roi_indices_.template data<int, CUDAContext>(), p_roi_item, p_score_item); CUDA_POST_KERNEL_CHECK; top_shape[0] += num_rois; } this->output(0)->Reshape(top_shape); if (this->OutputSize() > 1) { top_shape.pop_back(); this->output(1)->Reshape(top_shape); } } template void ProposalOp<CUDAContext>::RunWithType<float>(); }
08f79ad5e69f133c0b6d88cd52a5ab560beaf907.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************************** * Numerical Solution for the Cubic Nonlinear Schrodinger Equation in (1+1)D * * using explicit FDTD with second order splitting. * * Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. * * ********************************************************************************/ #include "../lib/cu_helpers.h" // Grid Parameters #define XN nodes // number of spatial nodes #define TN 100 // number of temporal nodes #define L 10.0 // Spatial Period #define TT 10.0 // Max time #define DX (2*L / XN) // spatial step size #define DT (TT / TN) // temporal step size // Gaussian Pulse Parameters #define A 1.0 #define R 2.0 // Timing parameters #define IRVL 100 // Timing interval. Take a reading every N iterations. // Output files #define PLOT_F "gpu_fdtd_plot.m" #define TIME_F argv[2] // Function Prototypes __global__ void Re_lin_kernel(double *Re, double *Im, double dt, int xn, double dx); __global__ void Im_lin_kernel(double *Re, double *Im, double dt, int xn, double dx); __global__ void nonlin_kernel(double *Re, double *Im, double dt, int xn); int main(int argc, char *argv[]) { // Timing info hipEvent_t begin_event, end_event; hipEventCreate(&begin_event); hipEventCreate(&end_event); // Print basic info about simulation const int nodes = atoi(argv[1]); printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX)); // Allocate host arrays double *h_x = (double*)malloc(sizeof(double) * XN); double *h_Re = (double*)malloc(sizeof(double) * XN); double *h_Im = (double*)malloc(sizeof(double) * XN); double *h_Re_0 = (double*)malloc(sizeof(double) * XN); double *h_Im_0 = (double*)malloc(sizeof(double) * XN); // Initial conditions on host for(int i = 0; i < XN ; i++) { h_x[i] = (i-XN/2)*DX; h_Re[i] = sqrt(2.0)/(cosh(h_x[i])); h_Im[i] = 0; //h_Re[i] = 2*exp(-(h_x[i]*h_x[i])/2.0/2.0); h_Im_0[i] = h_Im[i]; h_Re_0[i] = h_Re[i]; } // Allocate device arrays and copy from host double *d_Re, *d_Im; CUDAR_SAFE_CALL(hipMalloc(&d_Re, sizeof(double) * XN)); CUDAR_SAFE_CALL(hipMalloc(&d_Im, sizeof(double) * XN)); CUDAR_SAFE_CALL(hipMemcpy(d_Re, h_Re, sizeof(double) * XN, hipMemcpyHostToDevice)); CUDAR_SAFE_CALL(hipMemcpy(d_Im, h_Im, sizeof(double) * XN, hipMemcpyHostToDevice)); // Initialize the grid dim3 threadsPerBlock(128,1,1); dim3 blocksPerGrid((XN + 127)/128,1,1); // Timing starts here hipEventRecord(begin_event, 0); // Start time evolution for (int i = 1; i <= TN; i++) { // Solve linear part hipLaunchKernelGGL(( Re_lin_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_Re, d_Im, DT*0.5, XN, DX); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(hipPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING hipLaunchKernelGGL(( Im_lin_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_Re, d_Im, DT*0.5, XN, DX); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(hipPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Solve nonlinear part hipLaunchKernelGGL(( nonlin_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_Re, d_Im, DT, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(hipPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Solve linear part hipLaunchKernelGGL(( Re_lin_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_Re, d_Im, DT*0.5, XN, DX); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(hipPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING hipLaunchKernelGGL(( Im_lin_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_Re, d_Im, DT*0.5, XN, DX); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(hipPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING } float time_value; hipEventRecord(end_event, 0); hipEventSynchronize(end_event); hipEventElapsedTime(&time_value, begin_event, end_event); // Print time to file FILE *fp = fopen(TIME_F, "a"); fprintf(fp, "%f, ", time_value); fclose(fp); // Copy results to device CUDAR_SAFE_CALL(hipMemcpy(h_Re, d_Re, sizeof(double)*XN, hipMemcpyDeviceToHost)); CUDAR_SAFE_CALL(hipMemcpy(h_Im, d_Im, sizeof(double)*XN, hipMemcpyDeviceToHost)); // Plot results m_plot_1d(h_Re_0, h_Im_0, h_Re, h_Im, L, XN, PLOT_F); // Clean up free(h_Re); free(h_Im); free(h_Re_0); free(h_Im_0); free(h_x); CUDAR_SAFE_CALL(hipFree(d_Re)); CUDAR_SAFE_CALL(hipFree(d_Im)); return 0; } __global__ void Re_lin_kernel(double *Re, double *Im, double dt, int xn, double dx) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) if (i >= xn - 1 || i == 0) return; Re[i] = Re[i] - dt/(dx*dx)*(Im[i+1] - 2*Im[i] + Im[i-1]); } __global__ void Im_lin_kernel(double *Re, double *Im, double dt, int xn, double dx) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) if (i >= xn - 1 || i == 0) return; Im[i] = Im[i] + dt/(dx*dx)*(Re[i+1] - 2*Re[i] + Re[i-1]); } __global__ void nonlin_kernel(double *Re, double *Im, double dt, int xn) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) if (i >= xn - 1 || i == 0) return; double Rp = Re[i]; double Ip = Im[i]; double A2 = Rp*Rp+Ip*Ip; Re[i] = Rp*cos(A2*dt) - Ip*sin(A2*dt); Im[i] = Rp*sin(A2*dt) + Ip*cos(A2*dt); }
08f79ad5e69f133c0b6d88cd52a5ab560beaf907.cu
/********************************************************************************** * Numerical Solution for the Cubic Nonlinear Schrodinger Equation in (1+1)D * * using explicit FDTD with second order splitting. * * Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. * * ********************************************************************************/ #include "../lib/cu_helpers.h" // Grid Parameters #define XN nodes // number of spatial nodes #define TN 100 // number of temporal nodes #define L 10.0 // Spatial Period #define TT 10.0 // Max time #define DX (2*L / XN) // spatial step size #define DT (TT / TN) // temporal step size // Gaussian Pulse Parameters #define A 1.0 #define R 2.0 // Timing parameters #define IRVL 100 // Timing interval. Take a reading every N iterations. // Output files #define PLOT_F "gpu_fdtd_plot.m" #define TIME_F argv[2] // Function Prototypes __global__ void Re_lin_kernel(double *Re, double *Im, double dt, int xn, double dx); __global__ void Im_lin_kernel(double *Re, double *Im, double dt, int xn, double dx); __global__ void nonlin_kernel(double *Re, double *Im, double dt, int xn); int main(int argc, char *argv[]) { // Timing info cudaEvent_t begin_event, end_event; cudaEventCreate(&begin_event); cudaEventCreate(&end_event); // Print basic info about simulation const int nodes = atoi(argv[1]); printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX)); // Allocate host arrays double *h_x = (double*)malloc(sizeof(double) * XN); double *h_Re = (double*)malloc(sizeof(double) * XN); double *h_Im = (double*)malloc(sizeof(double) * XN); double *h_Re_0 = (double*)malloc(sizeof(double) * XN); double *h_Im_0 = (double*)malloc(sizeof(double) * XN); // Initial conditions on host for(int i = 0; i < XN ; i++) { h_x[i] = (i-XN/2)*DX; h_Re[i] = sqrt(2.0)/(cosh(h_x[i])); h_Im[i] = 0; //h_Re[i] = 2*exp(-(h_x[i]*h_x[i])/2.0/2.0); h_Im_0[i] = h_Im[i]; h_Re_0[i] = h_Re[i]; } // Allocate device arrays and copy from host double *d_Re, *d_Im; CUDAR_SAFE_CALL(cudaMalloc(&d_Re, sizeof(double) * XN)); CUDAR_SAFE_CALL(cudaMalloc(&d_Im, sizeof(double) * XN)); CUDAR_SAFE_CALL(cudaMemcpy(d_Re, h_Re, sizeof(double) * XN, cudaMemcpyHostToDevice)); CUDAR_SAFE_CALL(cudaMemcpy(d_Im, h_Im, sizeof(double) * XN, cudaMemcpyHostToDevice)); // Initialize the grid dim3 threadsPerBlock(128,1,1); dim3 blocksPerGrid((XN + 127)/128,1,1); // Timing starts here cudaEventRecord(begin_event, 0); // Start time evolution for (int i = 1; i <= TN; i++) { // Solve linear part Re_lin_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_Re, d_Im, DT*0.5, XN, DX); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(cudaPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING Im_lin_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_Re, d_Im, DT*0.5, XN, DX); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(cudaPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Solve nonlinear part nonlin_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_Re, d_Im, DT, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(cudaPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Solve linear part Re_lin_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_Re, d_Im, DT*0.5, XN, DX); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(cudaPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING Im_lin_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_Re, d_Im, DT*0.5, XN, DX); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(cudaPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING } float time_value; cudaEventRecord(end_event, 0); cudaEventSynchronize(end_event); cudaEventElapsedTime(&time_value, begin_event, end_event); // Print time to file FILE *fp = fopen(TIME_F, "a"); fprintf(fp, "%f, ", time_value); fclose(fp); // Copy results to device CUDAR_SAFE_CALL(cudaMemcpy(h_Re, d_Re, sizeof(double)*XN, cudaMemcpyDeviceToHost)); CUDAR_SAFE_CALL(cudaMemcpy(h_Im, d_Im, sizeof(double)*XN, cudaMemcpyDeviceToHost)); // Plot results m_plot_1d(h_Re_0, h_Im_0, h_Re, h_Im, L, XN, PLOT_F); // Clean up free(h_Re); free(h_Im); free(h_Re_0); free(h_Im_0); free(h_x); CUDAR_SAFE_CALL(cudaFree(d_Re)); CUDAR_SAFE_CALL(cudaFree(d_Im)); return 0; } __global__ void Re_lin_kernel(double *Re, double *Im, double dt, int xn, double dx) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) if (i >= xn - 1 || i == 0) return; Re[i] = Re[i] - dt/(dx*dx)*(Im[i+1] - 2*Im[i] + Im[i-1]); } __global__ void Im_lin_kernel(double *Re, double *Im, double dt, int xn, double dx) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) if (i >= xn - 1 || i == 0) return; Im[i] = Im[i] + dt/(dx*dx)*(Re[i+1] - 2*Re[i] + Re[i-1]); } __global__ void nonlin_kernel(double *Re, double *Im, double dt, int xn) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) if (i >= xn - 1 || i == 0) return; double Rp = Re[i]; double Ip = Im[i]; double A2 = Rp*Rp+Ip*Ip; Re[i] = Rp*cos(A2*dt) - Ip*sin(A2*dt); Im[i] = Rp*sin(A2*dt) + Ip*cos(A2*dt); }
284e3100e29fcfd53e984dfc38caa6982f348d8e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SPDX-FileCopyrightText: 2020 CERN // SPDX-License-Identifier: Apache-2.0 #include <iostream> #include <CopCore/Ranluxpp.h> #include <AdePT/SparseVector.h> #include <VecGeom/base/Stopwatch.h> /** The test fills a sparse vector with tracks having random energy. It demonstrates allocation, concurrent distribution of elements, selection based on a lambda predicate function, gathering of used slots in a selection vector, compacting elements by copy-constructing in a second sparse vector. */ /// A simple track struct Track_t { using Rng_t = RanluxppDouble; Rng_t rng; float energy{0.}; bool alive{true}; // a default constructor is not necessarily needed // constructor parameters (or copy constructor) can be passed via SparseVectorImplementation::next_free() __host__ __device__ Track_t(unsigned itr) { rng.SetSeed(itr); energy = (float)rng.Rndm(); } }; // some utility kernels for filling the vector concurrently and printing info (vector resides on device) __global__ void fill_tracks(adept::SparseVectorInterface<Track_t> *vect1_ptr, int num_elem) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= num_elem) return; // parameters of next_free are passed to the matching constructor called in place Track_t *track = vect1_ptr->next_free(tid); if (!track) COPCORE_EXCEPTION("Out of vector space"); } __global__ void print_tracks(adept::SparseVectorInterface<Track_t> *tracks, int start, int num) { const int nshared = tracks->size(); printf(" data: "); for (int i = start; i < start + num && i < nshared; ++i) { printf(" %.2f", (*tracks)[i].energy); if (!tracks->is_used(i)) printf("x"); } printf("...\n"); } __global__ void print_selected_tracks(adept::SparseVectorInterface<Track_t> *tracks, const unsigned *selection, const unsigned *n_selected, int start, int num) { printf("selected %d tracks:\n > ", *n_selected); int limit = min(*n_selected, start + num); for (int i = start; i < limit; ++i) { printf("%.2f ", (*tracks)[selection[i]].energy); } printf("...\n"); } __global__ void reset_selection(unsigned *nselected) { *nselected = 0; } template <typename Vector_t> __global__ void print_vector(int iarr, Vector_t *vect) { printf("=== vect %d: fNshared=%lu/%lu fNused=%lu fNbooked=%lu - shared=%.1f%% sparsity=%.1f%%\n", iarr, vect->size(), vect->capacity(), vect->size_used(), vect->size_booked(), 100. * vect->get_shared_fraction(), 100. * vect->get_sparsity()); } template <typename Vector_t, typename Function> __global__ void get_vector_data(const Vector_t *vect, Function vect_func, int *data) { // data should be allocated in managed memory, vect_func should call a getter of Vector_t *data = vect_func(vect); } /// Test performance-critical SparseVector operations, executing as kernels. The syncronization /// operations exposed are only for timing purposes, the operations are valid also without. //____________________________________________________________________________________________________ int main(void) { constexpr int VectorSize = 1 << 20; int ntracks = 1000000; using Vector_t = adept::SparseVector<Track_t, VectorSize>; // 1<<16 is the default vector size if parameter omitted using VectorInterface = adept::SparseVectorInterface<Track_t>; vecgeom::Stopwatch timer; Vector_t *vect1_ptr_d, *vect2_ptr_d; unsigned *sel_vector_d; unsigned *nselected_hd; printf("Running on %d tracks. Size of adept::SparseVector<Track_t, %d> = %lu\n", ntracks, VectorSize, sizeof(Vector_t)); // allocation can be done on device or managed memory COPCORE_CUDA_CHECK(hipMalloc(&vect1_ptr_d, sizeof(Vector_t))); COPCORE_CUDA_CHECK(hipMalloc(&vect2_ptr_d, sizeof(Vector_t))); COPCORE_CUDA_CHECK(hipMalloc(&sel_vector_d, VectorSize * sizeof(unsigned))); COPCORE_CUDA_CHECK(hipMallocManaged(&nselected_hd, sizeof(unsigned))); // managed variables to read state from device int *nshared, *nused, *nselected; COPCORE_CUDA_CHECK(hipMallocManaged(&nshared, 2 * sizeof(int))); COPCORE_CUDA_CHECK(hipMallocManaged(&nused, 2 * sizeof(int))); COPCORE_CUDA_CHECK(hipMallocManaged(&nselected, 2 * sizeof(int))); // static allocator for convenience Vector_t::MakeInstanceAt<copcore::BackendType::CUDA>(vect1_ptr_d); Vector_t::MakeInstanceAt<copcore::BackendType::CUDA>(vect2_ptr_d); hipLaunchKernelGGL(( reset_selection), dim3(1), dim3(1), 0, 0, nselected_hd); // Construct and distribute tracks concurrently COPCORE_CUDA_CHECK(hipDeviceSynchronize()); timer.Start(); hipLaunchKernelGGL(( fill_tracks), dim3((ntracks + 127) / 128), dim3(128), 0, 0, vect1_ptr_d, ntracks); hipLaunchKernelGGL(( get_vector_data), dim3(1), dim3(1), 0, 0, vect1_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size(); }, nshared); COPCORE_CUDA_CHECK(hipDeviceSynchronize()); auto time_fill = timer.Stop(); std::cout << "time_construct_and_share = " << time_fill << std::endl; hipLaunchKernelGGL(( print_vector), dim3(1), dim3(1), 0, 0, 1, vect1_ptr_d); hipLaunchKernelGGL(( print_tracks), dim3(1), dim3(1), 0, 0, vect1_ptr_d, 0, 32); // print just first 32 tracks int nfilled = *nshared; if (nfilled != ntracks) { std::cerr << "Error in next_free.\n"; return 1; } // Select tracks with energy < 0.2 // *** note that we can use any device predicate function with the prototype: // __device__ bool func(int, const Vector_t*) // index in the vector and const vector pointer COPCORE_CUDA_CHECK(hipDeviceSynchronize()); timer.Start(); auto select_func = [] __device__(int i, const VectorInterface *arr) { return ((*arr)[i].energy < 0.2); }; VectorInterface::select(vect1_ptr_d, select_func, sel_vector_d, nselected_hd); COPCORE_CUDA_CHECK(hipDeviceSynchronize()); auto time_select = timer.Stop(); int nselected1 = *nselected_hd; std::cout << "\ntime_select for " << nselected1 << " tracks with (energy < 0.2) = " << time_select << std::endl; hipLaunchKernelGGL(( print_vector), dim3(1), dim3(1), 0, 0, 1, vect1_ptr_d); hipLaunchKernelGGL(( print_selected_tracks), dim3(1), dim3(1), 0, 0, vect1_ptr_d, sel_vector_d, nselected_hd, 0, 32); if (nselected1 == 0) { std::cerr << "Error in select: 0 tracks.\n"; return 2; } // Release the tracks we just selected, creating holes in the vector COPCORE_CUDA_CHECK(hipDeviceSynchronize()); timer.Start(); VectorInterface::release_selected(vect1_ptr_d, sel_vector_d, nselected_hd); hipLaunchKernelGGL(( get_vector_data), dim3(1), dim3(1), 0, 0, vect1_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size_used(); }, nused); COPCORE_CUDA_CHECK(hipDeviceSynchronize()); auto time_release = timer.Stop(); std::cout << "\ntime_release_selected = " << time_release << " nused = " << *nused << std::endl; hipLaunchKernelGGL(( print_vector), dim3(1), dim3(1), 0, 0, 1, vect1_ptr_d); hipLaunchKernelGGL(( print_tracks), dim3(1), dim3(1), 0, 0, vect1_ptr_d, 0, 32); int nused_after_release = *nused; if ((nselected1 + nused_after_release) != ntracks) { std::cerr << "Error in release_selected.\n"; return 3; } // Demonstrate select_and_move functionality COPCORE_CUDA_CHECK(hipDeviceSynchronize()); timer.Start(); // a fuction selecting tracks having energy > 0.8. We move these tracks in a second vector auto select2_func = [] __device__(int i, const VectorInterface *arr) { return ((*arr)[i].energy > 0.8); }; //=== VectorInterface::select_and_move(vect1_ptr_d, select2_func, vect2_ptr_d, nselected_hd); //=== auto time_select_and_move = timer.Stop(); COPCORE_CUDA_CHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( get_vector_data), dim3(1), dim3(1), 0, 0, vect1_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size_used(); }, &nused[0]); hipLaunchKernelGGL(( get_vector_data), dim3(1), dim3(1), 0, 0, vect2_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size_used(); }, &nused[1]); COPCORE_CUDA_CHECK(hipDeviceSynchronize()); std::cout << "\ntime_select_and_move (energy > 0.8) = " << time_select_and_move << std::endl; hipLaunchKernelGGL(( print_vector), dim3(1), dim3(1), 0, 0, 1, vect1_ptr_d); hipLaunchKernelGGL(( print_tracks), dim3(1), dim3(1), 0, 0, vect1_ptr_d, 0, 32); hipLaunchKernelGGL(( print_vector), dim3(1), dim3(1), 0, 0, 2, vect2_ptr_d); hipLaunchKernelGGL(( print_tracks), dim3(1), dim3(1), 0, 0, vect2_ptr_d, 0, 32); // Check the moved tracks int nused_after_move = nused[0]; int nused_after_move2 = nused[1]; if ((nused_after_release - nused_after_move) != nused_after_move2) { std::cerr << "Error in select_and_move.\n"; return 4; } // Demonstrate a common selection method that should be used when the vector is fragmented. COPCORE_CUDA_CHECK(hipDeviceSynchronize()); timer.Start(); VectorInterface::select_used(vect1_ptr_d, sel_vector_d, nselected_hd); COPCORE_CUDA_CHECK(hipDeviceSynchronize()); auto time_select_used = timer.Stop(); std::cout << "\ntime_select_used = " << time_select_used << std::endl; hipLaunchKernelGGL(( print_selected_tracks), dim3(1), dim3(1), 0, 0, vect1_ptr_d, sel_vector_d, nselected_hd, 0, 32); if (*nselected_hd != nused_after_move) { std::cerr << "Error in select_used.\n"; return 5; } // Compact used elements by copying them into a destination vector. The stage above should be preferred // if the sparsity is small, while this one is preffered for high sparsity. See SparseVector header // for the definition of sparsity, shared and selected fractions. COPCORE_CUDA_CHECK(hipDeviceSynchronize()); timer.Start(); VectorInterface::compact(vect1_ptr_d, vect2_ptr_d, nselected_hd); COPCORE_CUDA_CHECK(hipDeviceSynchronize()); auto time_compact = timer.Stop(); hipLaunchKernelGGL(( get_vector_data), dim3(1), dim3(1), 0, 0, vect1_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size_used(); }, &nused[0]); hipLaunchKernelGGL(( get_vector_data), dim3(1), dim3(1), 0, 0, vect2_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size_used(); }, &nused[1]); COPCORE_CUDA_CHECK(hipDeviceSynchronize()); std::cout << "\ntime_compact = " << time_compact << std::endl; hipLaunchKernelGGL(( print_vector), dim3(1), dim3(1), 0, 0, 1, vect1_ptr_d); hipLaunchKernelGGL(( print_vector), dim3(1), dim3(1), 0, 0, 2, vect2_ptr_d); hipLaunchKernelGGL(( print_tracks), dim3(1), dim3(1), 0, 0, vect2_ptr_d, 0, 32); COPCORE_CUDA_CHECK(hipDeviceSynchronize()); if ((nused[0] != 0) || (nused[1] != nused_after_move2 + nused_after_move)) { std::cerr << "Error in compact.\n"; return 6; } COPCORE_CUDA_CHECK(hipFree(vect1_ptr_d)); COPCORE_CUDA_CHECK(hipFree(vect2_ptr_d)); COPCORE_CUDA_CHECK(hipFree(sel_vector_d)); return 0; }
284e3100e29fcfd53e984dfc38caa6982f348d8e.cu
// SPDX-FileCopyrightText: 2020 CERN // SPDX-License-Identifier: Apache-2.0 #include <iostream> #include <CopCore/Ranluxpp.h> #include <AdePT/SparseVector.h> #include <VecGeom/base/Stopwatch.h> /** The test fills a sparse vector with tracks having random energy. It demonstrates allocation, concurrent distribution of elements, selection based on a lambda predicate function, gathering of used slots in a selection vector, compacting elements by copy-constructing in a second sparse vector. */ /// A simple track struct Track_t { using Rng_t = RanluxppDouble; Rng_t rng; float energy{0.}; bool alive{true}; // a default constructor is not necessarily needed // constructor parameters (or copy constructor) can be passed via SparseVectorImplementation::next_free() __host__ __device__ Track_t(unsigned itr) { rng.SetSeed(itr); energy = (float)rng.Rndm(); } }; // some utility kernels for filling the vector concurrently and printing info (vector resides on device) __global__ void fill_tracks(adept::SparseVectorInterface<Track_t> *vect1_ptr, int num_elem) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= num_elem) return; // parameters of next_free are passed to the matching constructor called in place Track_t *track = vect1_ptr->next_free(tid); if (!track) COPCORE_EXCEPTION("Out of vector space"); } __global__ void print_tracks(adept::SparseVectorInterface<Track_t> *tracks, int start, int num) { const int nshared = tracks->size(); printf(" data: "); for (int i = start; i < start + num && i < nshared; ++i) { printf(" %.2f", (*tracks)[i].energy); if (!tracks->is_used(i)) printf("x"); } printf("...\n"); } __global__ void print_selected_tracks(adept::SparseVectorInterface<Track_t> *tracks, const unsigned *selection, const unsigned *n_selected, int start, int num) { printf("selected %d tracks:\n > ", *n_selected); int limit = min(*n_selected, start + num); for (int i = start; i < limit; ++i) { printf("%.2f ", (*tracks)[selection[i]].energy); } printf("...\n"); } __global__ void reset_selection(unsigned *nselected) { *nselected = 0; } template <typename Vector_t> __global__ void print_vector(int iarr, Vector_t *vect) { printf("=== vect %d: fNshared=%lu/%lu fNused=%lu fNbooked=%lu - shared=%.1f%% sparsity=%.1f%%\n", iarr, vect->size(), vect->capacity(), vect->size_used(), vect->size_booked(), 100. * vect->get_shared_fraction(), 100. * vect->get_sparsity()); } template <typename Vector_t, typename Function> __global__ void get_vector_data(const Vector_t *vect, Function vect_func, int *data) { // data should be allocated in managed memory, vect_func should call a getter of Vector_t *data = vect_func(vect); } /// Test performance-critical SparseVector operations, executing as kernels. The syncronization /// operations exposed are only for timing purposes, the operations are valid also without. //____________________________________________________________________________________________________ int main(void) { constexpr int VectorSize = 1 << 20; int ntracks = 1000000; using Vector_t = adept::SparseVector<Track_t, VectorSize>; // 1<<16 is the default vector size if parameter omitted using VectorInterface = adept::SparseVectorInterface<Track_t>; vecgeom::Stopwatch timer; Vector_t *vect1_ptr_d, *vect2_ptr_d; unsigned *sel_vector_d; unsigned *nselected_hd; printf("Running on %d tracks. Size of adept::SparseVector<Track_t, %d> = %lu\n", ntracks, VectorSize, sizeof(Vector_t)); // allocation can be done on device or managed memory COPCORE_CUDA_CHECK(cudaMalloc(&vect1_ptr_d, sizeof(Vector_t))); COPCORE_CUDA_CHECK(cudaMalloc(&vect2_ptr_d, sizeof(Vector_t))); COPCORE_CUDA_CHECK(cudaMalloc(&sel_vector_d, VectorSize * sizeof(unsigned))); COPCORE_CUDA_CHECK(cudaMallocManaged(&nselected_hd, sizeof(unsigned))); // managed variables to read state from device int *nshared, *nused, *nselected; COPCORE_CUDA_CHECK(cudaMallocManaged(&nshared, 2 * sizeof(int))); COPCORE_CUDA_CHECK(cudaMallocManaged(&nused, 2 * sizeof(int))); COPCORE_CUDA_CHECK(cudaMallocManaged(&nselected, 2 * sizeof(int))); // static allocator for convenience Vector_t::MakeInstanceAt<copcore::BackendType::CUDA>(vect1_ptr_d); Vector_t::MakeInstanceAt<copcore::BackendType::CUDA>(vect2_ptr_d); reset_selection<<<1, 1>>>(nselected_hd); // Construct and distribute tracks concurrently COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); timer.Start(); fill_tracks<<<(ntracks + 127) / 128, 128>>>(vect1_ptr_d, ntracks); get_vector_data<<<1, 1>>>(vect1_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size(); }, nshared); COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); auto time_fill = timer.Stop(); std::cout << "time_construct_and_share = " << time_fill << std::endl; print_vector<<<1, 1>>>(1, vect1_ptr_d); print_tracks<<<1, 1>>>(vect1_ptr_d, 0, 32); // print just first 32 tracks int nfilled = *nshared; if (nfilled != ntracks) { std::cerr << "Error in next_free.\n"; return 1; } // Select tracks with energy < 0.2 // *** note that we can use any device predicate function with the prototype: // __device__ bool func(int, const Vector_t*) // index in the vector and const vector pointer COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); timer.Start(); auto select_func = [] __device__(int i, const VectorInterface *arr) { return ((*arr)[i].energy < 0.2); }; VectorInterface::select(vect1_ptr_d, select_func, sel_vector_d, nselected_hd); COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); auto time_select = timer.Stop(); int nselected1 = *nselected_hd; std::cout << "\ntime_select for " << nselected1 << " tracks with (energy < 0.2) = " << time_select << std::endl; print_vector<<<1, 1>>>(1, vect1_ptr_d); print_selected_tracks<<<1, 1>>>(vect1_ptr_d, sel_vector_d, nselected_hd, 0, 32); if (nselected1 == 0) { std::cerr << "Error in select: 0 tracks.\n"; return 2; } // Release the tracks we just selected, creating holes in the vector COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); timer.Start(); VectorInterface::release_selected(vect1_ptr_d, sel_vector_d, nselected_hd); get_vector_data<<<1, 1>>>(vect1_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size_used(); }, nused); COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); auto time_release = timer.Stop(); std::cout << "\ntime_release_selected = " << time_release << " nused = " << *nused << std::endl; print_vector<<<1, 1>>>(1, vect1_ptr_d); print_tracks<<<1, 1>>>(vect1_ptr_d, 0, 32); int nused_after_release = *nused; if ((nselected1 + nused_after_release) != ntracks) { std::cerr << "Error in release_selected.\n"; return 3; } // Demonstrate select_and_move functionality COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); timer.Start(); // a fuction selecting tracks having energy > 0.8. We move these tracks in a second vector auto select2_func = [] __device__(int i, const VectorInterface *arr) { return ((*arr)[i].energy > 0.8); }; //=== VectorInterface::select_and_move(vect1_ptr_d, select2_func, vect2_ptr_d, nselected_hd); //=== auto time_select_and_move = timer.Stop(); COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); get_vector_data<<<1, 1>>>(vect1_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size_used(); }, &nused[0]); get_vector_data<<<1, 1>>>(vect2_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size_used(); }, &nused[1]); COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); std::cout << "\ntime_select_and_move (energy > 0.8) = " << time_select_and_move << std::endl; print_vector<<<1, 1>>>(1, vect1_ptr_d); print_tracks<<<1, 1>>>(vect1_ptr_d, 0, 32); print_vector<<<1, 1>>>(2, vect2_ptr_d); print_tracks<<<1, 1>>>(vect2_ptr_d, 0, 32); // Check the moved tracks int nused_after_move = nused[0]; int nused_after_move2 = nused[1]; if ((nused_after_release - nused_after_move) != nused_after_move2) { std::cerr << "Error in select_and_move.\n"; return 4; } // Demonstrate a common selection method that should be used when the vector is fragmented. COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); timer.Start(); VectorInterface::select_used(vect1_ptr_d, sel_vector_d, nselected_hd); COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); auto time_select_used = timer.Stop(); std::cout << "\ntime_select_used = " << time_select_used << std::endl; print_selected_tracks<<<1, 1>>>(vect1_ptr_d, sel_vector_d, nselected_hd, 0, 32); if (*nselected_hd != nused_after_move) { std::cerr << "Error in select_used.\n"; return 5; } // Compact used elements by copying them into a destination vector. The stage above should be preferred // if the sparsity is small, while this one is preffered for high sparsity. See SparseVector header // for the definition of sparsity, shared and selected fractions. COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); timer.Start(); VectorInterface::compact(vect1_ptr_d, vect2_ptr_d, nselected_hd); COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); auto time_compact = timer.Stop(); get_vector_data<<<1, 1>>>(vect1_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size_used(); }, &nused[0]); get_vector_data<<<1, 1>>>(vect2_ptr_d, [] __device__(const VectorInterface *arr) { return arr->size_used(); }, &nused[1]); COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); std::cout << "\ntime_compact = " << time_compact << std::endl; print_vector<<<1, 1>>>(1, vect1_ptr_d); print_vector<<<1, 1>>>(2, vect2_ptr_d); print_tracks<<<1, 1>>>(vect2_ptr_d, 0, 32); COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); if ((nused[0] != 0) || (nused[1] != nused_after_move2 + nused_after_move)) { std::cerr << "Error in compact.\n"; return 6; } COPCORE_CUDA_CHECK(cudaFree(vect1_ptr_d)); COPCORE_CUDA_CHECK(cudaFree(vect2_ptr_d)); COPCORE_CUDA_CHECK(cudaFree(sel_vector_d)); return 0; }
9dfbd829f5d9ca77cb8980b5a3f81c6997a7f87e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #define CHECK_FOR_CORRECTNESS 1 #define MIN(a,b) (( (a) < (b) )?(a):(b)) /* Kernel Function1 - Initialize the array */ __global__ void initializeArray(int* A, int* B, int N) { int i = threadIdx.x; if(i<N) B[i] = A[i]; printf("Setting B[%d] = %d , from A[%d] = %d\n", i, B[i], i, A[i]); } /* Kernel Function2 - PrefixOperations on B */ __global__ void prefixOnB(int* B, int t, int s) { int i = threadIdx.x; B[t + i] = MIN(B[s + 2*i - 1] , B[s + 2*i]); } /* kernel Function3 - PrefixOperations on C */ __global__ void prefixOnC(int* B, int* C,int t, int s) { int i = threadIdx.x; if (1 == i) C[t + i] = B[t + i]; else if((i%2) == 0) { C[t + i] = C[s + (i>>1)]; } else { C[t + i] = MIN(C[s +((i-1)>>1)] , B[t + i]); } } /* Kernel Function4 - Copy the results */ __global__ void copyArray(int* S, int* C, int N) { int i = threadIdx.x; S[i] = C[i]; printf("Setting S[%d] = %d , from C[%d] = %d\n", i, S[i], i, C[i]); } /* Just a somple function to get log to base 2*/ int log2(int x) { int k = 0; while(x>>=1) k++; return k; } /* Main function - All of the implementation is in main */ int main() { int N = 64; /* Declare and Initialize host arrays A, B, S */ int* h_A, *h_S, *h_B; size_t arrSize = N*sizeof(int); h_A = (int*)malloc(arrSize); h_B = (int*)malloc(2*arrSize); h_S = (int*)malloc(arrSize); /* Declare and Initialize device arrays A, B, C, S */ int *d_A,*d_B, *d_C, *d_S; hipMalloc(&d_A, arrSize); hipMalloc(&d_B, 2*arrSize); hipMalloc(&d_C, 2*arrSize); hipMalloc(&d_S, arrSize); int seed = 1078989; int mod = 32768, step = 7986721; for(int i =0;i<N;i++) { h_A[i] = seed%mod; seed+=step; } /* Copy vectors from host memory to device memory */ hipMemcpy(d_A, h_A, arrSize, hipMemcpyHostToDevice); /* First call to Kernel Function to Initialize B */ int threadsPerBlock = N; int blocksPerGrid = 1;hipLaunchKernelGGL(( initializeArray), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, N); #ifdef CHECK_FOR_CORRECTNESS for(int i1=0;i1<N;i1++) printf("DBUG_0 %d \n", h_A[i1]); /* Checking for correctness */ hipMemcpy(h_B, d_B, 2*arrSize, hipMemcpyDeviceToHost); for(int i=0;i<N;i++) printf("DBUG_1 %d %d\n", h_A[i], h_B[i]); #endif int m = N, t = 0, h=1; int k = log2(N); int s = 0; for(h =1; h<=k; h++) { s = t; t += m; m >>=1; /* Second call to CUDA Kernel Function - This time logN calls. Every call has m parallel instances */ blocksPerGrid = 1; threadsPerBlock = m; hipLaunchKernelGGL(( prefixOnB), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_B, t , s); } for(h=k;h>=0;h--) { blocksPerGrid = 1; threadsPerBlock = m; /* Third call to kernel function - Again logN times m of them */ hipLaunchKernelGGL(( prefixOnC), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_B, d_C, t , s); m<<=1; s= t; t-=m; } /* Copy the results from C */ threadsPerBlock = N; blocksPerGrid = 1;hipLaunchKernelGGL(( copyArray), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_S, d_C, N); hipMemcpy(h_S, d_S, arrSize, hipMemcpyDeviceToHost); for(int i=0;i<N;i++) printf("DBUG_2 %d %d\n", h_A[i], h_S[i]); /*Done with calculations - Free Device memory */ hipFree(d_A); hipFree(d_B); hipFree(d_C); hipFree(d_S); /* Free host memory */ free(h_A); free(h_B); free(h_S); return 0; }
9dfbd829f5d9ca77cb8980b5a3f81c6997a7f87e.cu
#include<stdio.h> #define CHECK_FOR_CORRECTNESS 1 #define MIN(a,b) (( (a) < (b) )?(a):(b)) /* Kernel Function1 - Initialize the array */ __global__ void initializeArray(int* A, int* B, int N) { int i = threadIdx.x; if(i<N) B[i] = A[i]; printf("Setting B[%d] = %d , from A[%d] = %d\n", i, B[i], i, A[i]); } /* Kernel Function2 - PrefixOperations on B */ __global__ void prefixOnB(int* B, int t, int s) { int i = threadIdx.x; B[t + i] = MIN(B[s + 2*i - 1] , B[s + 2*i]); } /* kernel Function3 - PrefixOperations on C */ __global__ void prefixOnC(int* B, int* C,int t, int s) { int i = threadIdx.x; if (1 == i) C[t + i] = B[t + i]; else if((i%2) == 0) { C[t + i] = C[s + (i>>1)]; } else { C[t + i] = MIN(C[s +((i-1)>>1)] , B[t + i]); } } /* Kernel Function4 - Copy the results */ __global__ void copyArray(int* S, int* C, int N) { int i = threadIdx.x; S[i] = C[i]; printf("Setting S[%d] = %d , from C[%d] = %d\n", i, S[i], i, C[i]); } /* Just a somple function to get log to base 2*/ int log2(int x) { int k = 0; while(x>>=1) k++; return k; } /* Main function - All of the implementation is in main */ int main() { int N = 64; /* Declare and Initialize host arrays A, B, S */ int* h_A, *h_S, *h_B; size_t arrSize = N*sizeof(int); h_A = (int*)malloc(arrSize); h_B = (int*)malloc(2*arrSize); h_S = (int*)malloc(arrSize); /* Declare and Initialize device arrays A, B, C, S */ int *d_A,*d_B, *d_C, *d_S; cudaMalloc(&d_A, arrSize); cudaMalloc(&d_B, 2*arrSize); cudaMalloc(&d_C, 2*arrSize); cudaMalloc(&d_S, arrSize); int seed = 1078989; int mod = 32768, step = 7986721; for(int i =0;i<N;i++) { h_A[i] = seed%mod; seed+=step; } /* Copy vectors from host memory to device memory */ cudaMemcpy(d_A, h_A, arrSize, cudaMemcpyHostToDevice); /* First call to Kernel Function to Initialize B */ int threadsPerBlock = N; int blocksPerGrid = 1; initializeArray<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, N); #ifdef CHECK_FOR_CORRECTNESS for(int i1=0;i1<N;i1++) printf("DBUG_0 %d \n", h_A[i1]); /* Checking for correctness */ cudaMemcpy(h_B, d_B, 2*arrSize, cudaMemcpyDeviceToHost); for(int i=0;i<N;i++) printf("DBUG_1 %d %d\n", h_A[i], h_B[i]); #endif int m = N, t = 0, h=1; int k = log2(N); int s = 0; for(h =1; h<=k; h++) { s = t; t += m; m >>=1; /* Second call to CUDA Kernel Function - This time logN calls. Every call has m parallel instances */ blocksPerGrid = 1; threadsPerBlock = m; prefixOnB<<<blocksPerGrid, threadsPerBlock>>>(d_B, t , s); } for(h=k;h>=0;h--) { blocksPerGrid = 1; threadsPerBlock = m; /* Third call to kernel function - Again logN times m of them */ prefixOnC<<<blocksPerGrid, threadsPerBlock>>>(d_B, d_C, t , s); m<<=1; s= t; t-=m; } /* Copy the results from C */ threadsPerBlock = N; blocksPerGrid = 1; copyArray<<<blocksPerGrid, threadsPerBlock>>>(d_S, d_C, N); cudaMemcpy(h_S, d_S, arrSize, cudaMemcpyDeviceToHost); for(int i=0;i<N;i++) printf("DBUG_2 %d %d\n", h_A[i], h_S[i]); /*Done with calculations - Free Device memory */ cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaFree(d_S); /* Free host memory */ free(h_A); free(h_B); free(h_S); return 0; }
e226dfa129732fceb0b0d4b273e8585b8fe00c1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************** * Author - Rutuja * Date - 08/05/2017 *****************************************************************************************/ #include "kernel_dilate.h" __global__ void kernel_dilate(const unsigned char* const input_binary_map, bool* output_dilated_map, /*const unsigned char* const mask,*/ const npy_int32 num_input, const npy_int32 num_output, const npy_int num_mask, const int* const grid_shape, const int blockdim){ extern __shared__ unsigned char curr_svox[]; unsigned int i = blockIdx.z*blockDim.z + threadIdx.z; unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; unsigned int k = blockIdx.x*blockDim.x + threadIdx.x; unsigned int idx = i*grid_shape[1]*grid_shape[2] + j*grid_shape[2] + k; unsigned int shared_idx = threadIdx.z*blockdim*blockdim + threadIdx.y*blockdim + threadIdx.x; if(idx < num_input){ curr_svox[shared_idx] = input_binary_map[idx]; } else{ curr_svox[shared_idx] = 0; } __syncthreads(); int mask_idx; if(idx < num_input){ if(curr_svox[shared_idx] == 1){ output_dilated_map[idx] = true; for(int l = -1;l < 2; l++){ for(int m = -1;m < 2;m++){ for(int n = -1;n < 2;n++){ mask_idx = (i+l)*grid_shape[1]*grid_shape[2] + (j+m)*grid_shape[2] + k+n; output_dilated_map[mask_idx] = true; } } } } } }
e226dfa129732fceb0b0d4b273e8585b8fe00c1c.cu
/*************************************************************************************** * Author - Rutuja * Date - 08/05/2017 *****************************************************************************************/ #include "kernel_dilate.h" __global__ void kernel_dilate(const unsigned char* const input_binary_map, bool* output_dilated_map, /*const unsigned char* const mask,*/ const npy_int32 num_input, const npy_int32 num_output, const npy_int num_mask, const int* const grid_shape, const int blockdim){ extern __shared__ unsigned char curr_svox[]; unsigned int i = blockIdx.z*blockDim.z + threadIdx.z; unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; unsigned int k = blockIdx.x*blockDim.x + threadIdx.x; unsigned int idx = i*grid_shape[1]*grid_shape[2] + j*grid_shape[2] + k; unsigned int shared_idx = threadIdx.z*blockdim*blockdim + threadIdx.y*blockdim + threadIdx.x; if(idx < num_input){ curr_svox[shared_idx] = input_binary_map[idx]; } else{ curr_svox[shared_idx] = 0; } __syncthreads(); int mask_idx; if(idx < num_input){ if(curr_svox[shared_idx] == 1){ output_dilated_map[idx] = true; for(int l = -1;l < 2; l++){ for(int m = -1;m < 2;m++){ for(int n = -1;n < 2;n++){ mask_idx = (i+l)*grid_shape[1]*grid_shape[2] + (j+m)*grid_shape[2] + k+n; output_dilated_map[mask_idx] = true; } } } } } }
5c60bba4d9d932846da9fcb3082d992282944530.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "octree_nn.h" #include "device_alternate.h" #include <thrust/transform_scan.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/replace.h> #include <thrust/sequence.h> template <typename Dtype> inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); template <> inline __device__ float caffe_gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } // double atomicAdd implementation taken from: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG template <> inline __device__ double caffe_gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <typename Dtype> __global__ void memset_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void memset_gpu(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); return; } hipLaunchKernelGGL(( memset_kernel<Dtype>) , dim3(CudaGetBlocks(N)), dim3(kCudaThreadsNum) , 0, 0, N, alpha, Y); } template <typename Dtype> void memcpy_gpu(const int N, const Dtype* X, Dtype* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, sizeof(Dtype) * N, hipMemcpyDefault)); } } template <typename Dtype> __global__ void pad_forward_kernel(Dtype* Y, const int Hy, const Dtype* X, const int Hx, const int* label, const int n) { CUDA_KERNEL_LOOP(i, n) { int h = i % Hy; int c = i / Hy; int idx = label[h]; Y[i] = idx == -1 ? Dtype(0) : X[c * Hx + idx]; } } template <typename Dtype> __global__ void pad_backward_kernel(Dtype* X, const int Hx, const Dtype* Y, const int Hy, const int* label, const int n) { CUDA_KERNEL_LOOP(i, n) { int h = i % Hy; int c = i / Hy; int idx = label[h]; if (idx != -1) { X[c * Hx + idx] = Y[i]; } } } template<typename Dtype> void pad_forward_gpu(Dtype* Y, const int Hy, const int Cy, const Dtype* X, const int Hx, const int* label) { int n = Hy * Cy; // Note: Cx == Cy hipLaunchKernelGGL(( pad_forward_kernel<Dtype>) , dim3(CudaGetBlocks(n)), dim3(kCudaThreadsNum) , 0, 0, Y, Hy, X, Hx, label, n); CUDA_POST_KERNEL_CHECK; } template<typename Dtype> void pad_backward_gpu(Dtype* X, const int Hx, const int Cx, const Dtype* Y, const int Hy, const int* label) { int n = Hy * Cx; // Note: Cx == Cy hipLaunchKernelGGL(( pad_backward_kernel<Dtype>) , dim3(CudaGetBlocks(n)), dim3(kCudaThreadsNum) , 0, 0, X, Hx, Y, Hy, label, n); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree2col_kernel(Dtype* data_col, const Dtype* data_octree, const int height, const int kernel_dim, const int stride, const int* neigh, const int* ni, const int height_col, const int n, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { int h = i % height_col; int h1 = h + n * height_col; if (h1 >= height) { data_col[i] = 0; continue; } int t = i / height_col; int k = t % kernel_dim; int c = t / kernel_dim; int octree_h = height << 3 * (stride - 1); int index = stride == 2 ? (h1 << 6) + ni[k] : (h1 >> 3 << 6) + ni[(h1 % 8) * kernel_dim + k]; int p = neigh[index]; data_col[i] = p == -1 ? Dtype(0) : data_octree[c * octree_h + p]; } } template <typename Dtype> __global__ void col2octree_kernel(const Dtype* data_col, Dtype* data_octree, const int height, const int kernel_dim, const int stride, const int* neigh, const int* ni, const int height_col, const int n, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { int h = i % height_col; int h1 = h + n * height_col; if (h1 >= height) continue; int t = i / height_col; int k = t % kernel_dim; int c = t / kernel_dim; int octree_h = height << 3 * (stride - 1); int index = stride == 2 ? (h1 << 6) + ni[k] : (h1 >> 3 << 6) + ni[(h1 % 8) * kernel_dim + k]; int p = neigh[index]; if (p != -1) caffe_gpu_atomic_add(data_col[i], data_octree + c * octree_h + p); } } template <typename Dtype> void octree2col_gpu(Dtype* data_col, const Dtype* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n) { const int kernel = kernel_sdim; const int thread_num = channel * kernel * height_col; hipLaunchKernelGGL(( octree2col_kernel<Dtype>) , dim3(CudaGetBlocks(thread_num)), dim3(kCudaThreadsNum) , 0, 0, data_col, data_octree, height, kernel, stride, neigh, ni, height_col, n, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void col2octree_gpu(const Dtype* data_col, Dtype* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n) { const int kernel = kernel_sdim; // kernel size: 3*3*3 const int thread_num = channel * kernel * height_col; int octree_h = height << 3 * (stride - 1); // set data_octree to zero ONCE when n ==0 if (n == 0) memset_gpu(channel * octree_h, Dtype(0), data_octree); hipLaunchKernelGGL(( col2octree_kernel<Dtype>) , dim3(CudaGetBlocks(thread_num)), dim3(kCudaThreadsNum) , 0, 0, data_col, data_octree, height, kernel, stride, neigh, ni, height_col, n, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_max_pool_kernel(Dtype* top_data, const int top_h, int* mask, const Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int h = i % top_h; int c = i / top_h; int hb = 8 * h; int max_idx = hb; btm_data += c * btm_h; Dtype max_val = btm_data[hb]; #pragma unroll 7 for (int idx = hb + 1; idx < hb + 8; ++idx) { Dtype value = btm_data[idx]; if (value > max_val) { max_idx = idx; max_val = value; } } top_data[i] = max_val; mask[i] = max_idx; } } template<typename Dtype> void octree_max_pool_gpu(Dtype* top_data, int top_h, int* mask, const Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; hipLaunchKernelGGL(( octree_max_pool_kernel<Dtype>) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_max_unpool_kernel(const Dtype* top_data, const int top_h, const int* mask, Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int c = i / top_h; btm_data[c * btm_h + mask[i]] = top_data[i]; } } template<typename Dtype> void octree_max_unpool_gpu(const Dtype* top_data, int top_h, const int* mask, Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; memset_gpu(btm_h * channel, Dtype(0), btm_data); hipLaunchKernelGGL(( octree_max_unpool_kernel<Dtype>) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_mask_pool_kernel(Dtype* top_data, const int top_h, const int* mask, const Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int c = i / top_h; top_data[i] = btm_data[c * btm_h + mask[i]]; } } template<typename Dtype> void octree_mask_pool_gpu(Dtype* top_data, int top_h, const int* mask, const Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; hipLaunchKernelGGL(( octree_mask_pool_kernel<Dtype>) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } __global__ void calc_neigh_kernel(int* neigh_split, const int* neigh, const int* children, const int* parent, const int* dis, const int thread_num) { CUDA_KERNEL_LOOP(id, thread_num) { int i = id >> 6; int j = id % 64; int l0 = children[i]; if (l0 != -1) { const int* ngh0 = neigh + (i >> 3 << 6); const int* pi0 = parent + (i % 8) * 64; int* ngh1 = neigh_split + (l0 << 6); int t = -1; int k = ngh0[pi0[j]]; if (k != -1) { int l1 = children[k]; if (l1 != -1) { t = (l1 << 3) + dis[j]; } } ngh1[j] = t; } } } void calc_neigh_gpu(int* neigh_split, const int* neigh, const int* children, const int node_num, const int* parent, const int* dis) { int n = node_num << 6; // node_num: the non_empty node number of parent layer hipLaunchKernelGGL(( calc_neigh_kernel) , dim3(CudaGetBlocks(n)), dim3(kCudaThreadsNum) , 0, 0, neigh_split, neigh, children, parent, dis, n); } __global__ void calc_full_neigh_kernel(int* neigh, const int depth, const int batch_size, const int thread_num) { CUDA_KERNEL_LOOP(id, thread_num) { const unsigned bound = 1 << depth; unsigned node_num = 1 << 3 * depth; unsigned num = node_num >> 3; unsigned tm = id; unsigned z = tm % 4; tm /= 4; unsigned y = tm % 4; tm /= 4; unsigned x = tm % 4; tm /= 4; unsigned i = (tm % num) * 8; unsigned n = tm / num; unsigned x0 = 0, y0 = 0, z0 = 0; #pragma unroll 4 for (unsigned d = 0; d < depth; d++) { x0 |= (i & (1 << 3 * d + 2)) >> (2 * d + 2); y0 |= (i & (1 << 3 * d + 1)) >> (2 * d + 1); z0 |= (i & (1 << 3 * d + 0)) >> (2 * d + 0); } unsigned x1 = x0 + x - 1; unsigned y1 = y0 + y - 1; unsigned z1 = z0 + z - 1; int v = -1; if ((x1 & bound) == 0 && (y1 & bound) == 0 && (z1 & bound) == 0) { unsigned key1 = 0; #pragma unroll 4 for (int d = 0; d < depth; d++) { unsigned mask = 1u << d; key1 |= ((x1 & mask) << (2 * d + 2)) | ((y1 & mask) << (2 * d + 1)) | ((z1 & mask) << (2 * d)); } v = key1 + n * node_num; } neigh[id] = v; } } void calc_neigh_gpu(int* neigh, const int depth, const int batch_size) { int thread_num = batch_size * (1 << 3 * depth + 3); hipLaunchKernelGGL(( calc_full_neigh_kernel) , dim3(CudaGetBlocks(thread_num)), dim3(kCudaThreadsNum) , 0, 0, neigh, depth, batch_size, thread_num); CUDA_POST_KERNEL_CHECK; } __global__ void gen_key_kernel(uint32* key_child, const uint32* key, const int* child, const int thread_num) { typedef unsigned char ubyte; CUDA_KERNEL_LOOP(id, thread_num) { int i = id >> 3; int j = id % 8; int label = child[i]; if (label != -1) { const ubyte* k0 = (const ubyte*)(key + i); ubyte* k1 = (ubyte*)(key_child + 8 * label + j); k1[0] = (k0[0] << 1) | ((j & 4) >> 2); k1[1] = (k0[1] << 1) | ((j & 2) >> 1); k1[2] = (k0[2] << 1) | (j & 1); k1[3] = k0[3]; } } } // use the information from parent layer to calculate the key of current layer void generate_key_gpu(uint32* key_child, const uint32* key, const int* child, const int node_num) { int n = node_num << 3; // node_num: the node number of parent layer hipLaunchKernelGGL(( gen_key_kernel) , dim3(CudaGetBlocks(n)), dim3(kCudaThreadsNum) , 0, 0, key_child, key, child, n); CUDA_POST_KERNEL_CHECK; } __global__ void gen_full_key_kernel(uint32* key, const int depth, const int batch_size, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { unsigned node_num = 1 << 3 * depth; unsigned k = i % node_num; unsigned xyz = 0; unsigned char* ptr = (unsigned char*)(&xyz); #pragma unroll 8 for (int d = 0; d < depth; d++) { ptr[0] |= (k & (1 << 3 * d + 2)) >> (2 * d + 2); ptr[1] |= (k & (1 << 3 * d + 1)) >> (2 * d + 1); ptr[2] |= (k & (1 << 3 * d + 0)) >> (2 * d + 0); } ptr[3] = i / node_num; key[i] = xyz; } } void generate_key_gpu(uint32* key, const int depth, const int batch_size) { int thread_num = batch_size * (1 << 3 * depth); hipLaunchKernelGGL(( gen_full_key_kernel) , dim3(CudaGetBlocks(thread_num)), dim3(kCudaThreadsNum) , 0, 0, key, depth, batch_size, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void generate_label_gpu(int* label_data, int& top_h, const Dtype* btm_data, const int btm_h, const int mask) { top_h = 0; thrust::transform_exclusive_scan(thrust::device, btm_data, btm_data + btm_h, label_data, mask == thrust::placeholders::_1, 0, thrust::plus<int>()); hipMemcpy(&top_h, label_data + btm_h - 1, sizeof(int), hipMemcpyDeviceToHost); Dtype flag = -1; hipMemcpy(&flag, btm_data + btm_h - 1, sizeof(Dtype), hipMemcpyDeviceToHost); if (mask == flag) top_h++; thrust::replace_if(thrust::device, label_data, label_data + btm_h, btm_data, mask != thrust::placeholders::_1, -1); } template <typename Dtype> void sequence_gpu(Dtype* ptr, const int num) { thrust::sequence(thrust::device, ptr, ptr + num); } __global__ void validate_search_kernel(int* idx, const unsigned* key, const int n_key, const unsigned* query, const int n_query) { CUDA_KERNEL_LOOP(i, n_query) { int j = idx[i]; if (j >= n_key || key[j] != query[i]) idx[i] = -1; } } void search_key_gpu(int* idx, const uint32* key, const int n_key, const uint32* query, const int n_query) { thrust::lower_bound(thrust::device, key, key + n_key, query, query + n_query, idx); hipLaunchKernelGGL(( validate_search_kernel) , dim3(CudaGetBlocks(n_query)), dim3(kCudaThreadsNum) , 0, 0, idx, key, n_key, query, n_query); CUDA_POST_KERNEL_CHECK; } // NOTE: !!! currently the depth should be less than 8 __global__ void xyz2key_kernel(uint32* key, const uint32* xyz, const int num, const int depth) { CUDA_KERNEL_LOOP(i, num) { uint32 xyz_in = xyz[i]; uint32 key_out = 0; unsigned char* ptr = (unsigned char*)(&xyz_in); unsigned char* ptr_out = (unsigned char*)(&key_out); #pragma unroll 8 for (int d = 0; d < depth; ++d) { unsigned char mask = 1 << d; key_out |= (ptr[0] & mask) << (2 * d + 2) | (ptr[1] & mask) << (2 * d + 1) | (ptr[2] & mask) << (2 * d + 0); } ptr_out[3] = ptr[3]; key[i] = key_out; } } void xyz2key_gpu(uint32* key, const uint32* xyz, const int num, const int depth) { hipLaunchKernelGGL(( xyz2key_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, key, xyz, num, depth); CUDA_POST_KERNEL_CHECK; } // NOTE: !!! currently the depth should be less than 8 __global__ void key2xyz_kernel(uint32* xyz, const uint32* key, const int num, const int depth) { CUDA_KERNEL_LOOP(i, num) { uint32 key_in = key[i], xyz_out = 0; unsigned char* pt = (unsigned char*)(&xyz_out); unsigned char* ptr = (unsigned char*)(&key_in); pt[3] = ptr[3]; #pragma unroll 8 for (int d = 0; d < depth; d++) { pt[0] |= (key_in & (1u << (3 * d + 2))) >> (2 * d + 2); pt[1] |= (key_in & (1u << (3 * d + 1))) >> (2 * d + 1); pt[2] |= (key_in & (1u << (3 * d))) >> (2 * d); } xyz[i] = xyz_out; } } void key2xyz_gpu(uint32* xyz, const uint32* key, const int num, const int depth) { hipLaunchKernelGGL(( key2xyz_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, xyz, key, num, depth); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void align_forward_kernel(Dtype* top_data, const int Htop, const Dtype* btm_data, const int Hbtm, const int* index_data, const int num) { CUDA_KERNEL_LOOP(i, num) { int h = i % Hbtm; int c = i / Hbtm; int j = index_data[h]; if (j != -1) { top_data[c * Htop + j] = btm_data[i]; } } } template <typename Dtype> void align_forward_gpu(Dtype* top_data, const int top_h, const int channel, const Dtype* btm_data, const int btm_h, const int* idx, const int num) { memset_gpu(num, Dtype(0), top_data); hipLaunchKernelGGL(( align_forward_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, btm_data, btm_h, idx, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void align_backward_kernel(const Dtype* top_data, const int Htop, Dtype* btm_data, const int Hbtm, const int* index_data, const int num) { CUDA_KERNEL_LOOP(i, num) { int h = i % Hbtm; int c = i / Hbtm; int j = index_data[h]; btm_data[i] = j == -1 ? 0 : top_data[c * Htop + j]; } } template <typename Dtype> void align_backward_gpu(const Dtype* top_data, const int top_h, const int channel, Dtype* btm_data, const int btm_h, const int* idx, const int num) { hipLaunchKernelGGL(( align_backward_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, btm_data, btm_h, idx, num); CUDA_POST_KERNEL_CHECK; } __global__ void octree_mask_kernel(float* des, const float* src, const int* label_data, const int height, const int mask, const int n) { CUDA_KERNEL_LOOP(i, n) { int h = i % height; des[i] = label_data[h] == mask ? float(0) : src[i]; } } void octree_mask_gpu(float* out_data, const float* in_data, const int* label, int height, int mask, int num) { hipLaunchKernelGGL(( octree_mask_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, out_data, in_data, label, height, mask, num); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void memset_gpu<int>(const int N, const int alpha, int* Y); template void memset_gpu<float>(const int N, const float alpha, float* Y); template void memset_gpu<double>(const int N, const double alpha, double* Y); template void memset_gpu<char>(const int N, const char alpha, char* Y); template void memset_gpu<int8_t>(const int N, const int8_t alpha, int8_t* Y); template void memset_gpu<uint8_t>(const int N, const uint8_t alpha, uint8_t* Y); template void memcpy_gpu<int>(const int N, const int* X, int* Y); template void memcpy_gpu<unsigned>(const int N, const unsigned* X, unsigned* Y); template void memcpy_gpu<float>(const int N, const float* X, float* Y); template void memcpy_gpu<double>(const int N, const double* X, double* Y); template void sequence_gpu<int>(int* ptr, const int num); template void sequence_gpu<unsigned int>(unsigned int* ptr, const int num); template void pad_forward_gpu<float>(float* Y, const int Hy, const int Cy, const float* X, const int Hx, const int* label); template void pad_forward_gpu<double>(double* Y, const int Hy, const int Cy, const double* X, const int Hx, const int* label); template void pad_backward_gpu<float>(float* X, const int Hx, const int Cx, const float* Y, const int Hy, const int* label); template void pad_backward_gpu<double>(double* X, const int Hx, const int Cx, const double* Y, const int Hy, const int* label); template void octree2col_gpu<float>(float* data_col, const float* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void octree2col_gpu<double>(double* data_col, const double* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void col2octree_gpu<float>(const float* data_col, float* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void col2octree_gpu<double>(const double* data_col, double* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void generate_label_gpu<float>(int* label_data, int& top_h, const float* bottom_data, const int bottom_h, const int mask); template void generate_label_gpu<double>(int* label_data, int& top_h, const double* bottom_data, const int bottom_h, const int mask); template void generate_label_gpu<int>(int* label_data, int& top_h, const int* bottom_data, const int bottom_h, const int mask); template void octree_max_pool_gpu<float>(float* top_data, int top_h, int* mask, const float* btm_data, int bottom_h, int channel); template void octree_max_pool_gpu<double>(double* top_data, int top_h, int* mask, const double* btm_data, int bottom_h, int channel); template void octree_max_unpool_gpu<float>(const float* top_data, int top_h, const int* mask, float* btm_data, int bottom_h, int channel); template void octree_max_unpool_gpu<double>(const double* top_data, int top_h, const int* mask, double* btm_data, int bottom_h, int channel); template void octree_mask_pool_gpu<float>(float* top_data, int top_h, const int* mask, const float* btm_data, int bottom_h, int channel); template void octree_mask_pool_gpu<double>(double* top_data, int top_h, const int* mask, const double* btm_data, int bottom_h, int channel); template void align_forward_gpu(float* top_data, const int top_h, const int c, const float* btm_data, const int btm_h, const int* idx, const int n); template void align_forward_gpu(double* top_data, const int top_h, const int c, const double* btm_data, const int btm_h, const int* idx, const int n); template void align_backward_gpu(const float* top_data, const int top_h, const int c, float* btm_data, const int btm_h, const int* idx, const int n); template void align_backward_gpu(const double* top_data, const int top_h, const int c, double* btm_data, const int btm_h, const int* idx, const int n);
5c60bba4d9d932846da9fcb3082d992282944530.cu
#include "octree_nn.h" #include "device_alternate.h" #include <thrust/transform_scan.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/replace.h> #include <thrust/sequence.h> template <typename Dtype> inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); template <> inline __device__ float caffe_gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } // double atomicAdd implementation taken from: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG template <> inline __device__ double caffe_gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <typename Dtype> __global__ void memset_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void memset_gpu(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); return; } memset_kernel<Dtype> <<< CudaGetBlocks(N), kCudaThreadsNum >>> ( N, alpha, Y); } template <typename Dtype> void memcpy_gpu(const int N, const Dtype* X, Dtype* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, sizeof(Dtype) * N, cudaMemcpyDefault)); } } template <typename Dtype> __global__ void pad_forward_kernel(Dtype* Y, const int Hy, const Dtype* X, const int Hx, const int* label, const int n) { CUDA_KERNEL_LOOP(i, n) { int h = i % Hy; int c = i / Hy; int idx = label[h]; Y[i] = idx == -1 ? Dtype(0) : X[c * Hx + idx]; } } template <typename Dtype> __global__ void pad_backward_kernel(Dtype* X, const int Hx, const Dtype* Y, const int Hy, const int* label, const int n) { CUDA_KERNEL_LOOP(i, n) { int h = i % Hy; int c = i / Hy; int idx = label[h]; if (idx != -1) { X[c * Hx + idx] = Y[i]; } } } template<typename Dtype> void pad_forward_gpu(Dtype* Y, const int Hy, const int Cy, const Dtype* X, const int Hx, const int* label) { int n = Hy * Cy; // Note: Cx == Cy pad_forward_kernel<Dtype> <<< CudaGetBlocks(n), kCudaThreadsNum >>> ( Y, Hy, X, Hx, label, n); CUDA_POST_KERNEL_CHECK; } template<typename Dtype> void pad_backward_gpu(Dtype* X, const int Hx, const int Cx, const Dtype* Y, const int Hy, const int* label) { int n = Hy * Cx; // Note: Cx == Cy pad_backward_kernel<Dtype> <<< CudaGetBlocks(n), kCudaThreadsNum >>> ( X, Hx, Y, Hy, label, n); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree2col_kernel(Dtype* data_col, const Dtype* data_octree, const int height, const int kernel_dim, const int stride, const int* neigh, const int* ni, const int height_col, const int n, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { int h = i % height_col; int h1 = h + n * height_col; if (h1 >= height) { data_col[i] = 0; continue; } int t = i / height_col; int k = t % kernel_dim; int c = t / kernel_dim; int octree_h = height << 3 * (stride - 1); int index = stride == 2 ? (h1 << 6) + ni[k] : (h1 >> 3 << 6) + ni[(h1 % 8) * kernel_dim + k]; int p = neigh[index]; data_col[i] = p == -1 ? Dtype(0) : data_octree[c * octree_h + p]; } } template <typename Dtype> __global__ void col2octree_kernel(const Dtype* data_col, Dtype* data_octree, const int height, const int kernel_dim, const int stride, const int* neigh, const int* ni, const int height_col, const int n, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { int h = i % height_col; int h1 = h + n * height_col; if (h1 >= height) continue; int t = i / height_col; int k = t % kernel_dim; int c = t / kernel_dim; int octree_h = height << 3 * (stride - 1); int index = stride == 2 ? (h1 << 6) + ni[k] : (h1 >> 3 << 6) + ni[(h1 % 8) * kernel_dim + k]; int p = neigh[index]; if (p != -1) caffe_gpu_atomic_add(data_col[i], data_octree + c * octree_h + p); } } template <typename Dtype> void octree2col_gpu(Dtype* data_col, const Dtype* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n) { const int kernel = kernel_sdim; const int thread_num = channel * kernel * height_col; octree2col_kernel<Dtype> <<< CudaGetBlocks(thread_num), kCudaThreadsNum >>> ( data_col, data_octree, height, kernel, stride, neigh, ni, height_col, n, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void col2octree_gpu(const Dtype* data_col, Dtype* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n) { const int kernel = kernel_sdim; // kernel size: 3*3*3 const int thread_num = channel * kernel * height_col; int octree_h = height << 3 * (stride - 1); // set data_octree to zero ONCE when n ==0 if (n == 0) memset_gpu(channel * octree_h, Dtype(0), data_octree); col2octree_kernel<Dtype> <<< CudaGetBlocks(thread_num), kCudaThreadsNum >>> ( data_col, data_octree, height, kernel, stride, neigh, ni, height_col, n, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_max_pool_kernel(Dtype* top_data, const int top_h, int* mask, const Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int h = i % top_h; int c = i / top_h; int hb = 8 * h; int max_idx = hb; btm_data += c * btm_h; Dtype max_val = btm_data[hb]; #pragma unroll 7 for (int idx = hb + 1; idx < hb + 8; ++idx) { Dtype value = btm_data[idx]; if (value > max_val) { max_idx = idx; max_val = value; } } top_data[i] = max_val; mask[i] = max_idx; } } template<typename Dtype> void octree_max_pool_gpu(Dtype* top_data, int top_h, int* mask, const Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; octree_max_pool_kernel<Dtype> <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_max_unpool_kernel(const Dtype* top_data, const int top_h, const int* mask, Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int c = i / top_h; btm_data[c * btm_h + mask[i]] = top_data[i]; } } template<typename Dtype> void octree_max_unpool_gpu(const Dtype* top_data, int top_h, const int* mask, Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; memset_gpu(btm_h * channel, Dtype(0), btm_data); octree_max_unpool_kernel<Dtype> <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_mask_pool_kernel(Dtype* top_data, const int top_h, const int* mask, const Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int c = i / top_h; top_data[i] = btm_data[c * btm_h + mask[i]]; } } template<typename Dtype> void octree_mask_pool_gpu(Dtype* top_data, int top_h, const int* mask, const Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; octree_mask_pool_kernel<Dtype> <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } __global__ void calc_neigh_kernel(int* neigh_split, const int* neigh, const int* children, const int* parent, const int* dis, const int thread_num) { CUDA_KERNEL_LOOP(id, thread_num) { int i = id >> 6; int j = id % 64; int l0 = children[i]; if (l0 != -1) { const int* ngh0 = neigh + (i >> 3 << 6); const int* pi0 = parent + (i % 8) * 64; int* ngh1 = neigh_split + (l0 << 6); int t = -1; int k = ngh0[pi0[j]]; if (k != -1) { int l1 = children[k]; if (l1 != -1) { t = (l1 << 3) + dis[j]; } } ngh1[j] = t; } } } void calc_neigh_gpu(int* neigh_split, const int* neigh, const int* children, const int node_num, const int* parent, const int* dis) { int n = node_num << 6; // node_num: the non_empty node number of parent layer calc_neigh_kernel <<< CudaGetBlocks(n), kCudaThreadsNum >>> ( neigh_split, neigh, children, parent, dis, n); } __global__ void calc_full_neigh_kernel(int* neigh, const int depth, const int batch_size, const int thread_num) { CUDA_KERNEL_LOOP(id, thread_num) { const unsigned bound = 1 << depth; unsigned node_num = 1 << 3 * depth; unsigned num = node_num >> 3; unsigned tm = id; unsigned z = tm % 4; tm /= 4; unsigned y = tm % 4; tm /= 4; unsigned x = tm % 4; tm /= 4; unsigned i = (tm % num) * 8; unsigned n = tm / num; unsigned x0 = 0, y0 = 0, z0 = 0; #pragma unroll 4 for (unsigned d = 0; d < depth; d++) { x0 |= (i & (1 << 3 * d + 2)) >> (2 * d + 2); y0 |= (i & (1 << 3 * d + 1)) >> (2 * d + 1); z0 |= (i & (1 << 3 * d + 0)) >> (2 * d + 0); } unsigned x1 = x0 + x - 1; unsigned y1 = y0 + y - 1; unsigned z1 = z0 + z - 1; int v = -1; if ((x1 & bound) == 0 && (y1 & bound) == 0 && (z1 & bound) == 0) { unsigned key1 = 0; #pragma unroll 4 for (int d = 0; d < depth; d++) { unsigned mask = 1u << d; key1 |= ((x1 & mask) << (2 * d + 2)) | ((y1 & mask) << (2 * d + 1)) | ((z1 & mask) << (2 * d)); } v = key1 + n * node_num; } neigh[id] = v; } } void calc_neigh_gpu(int* neigh, const int depth, const int batch_size) { int thread_num = batch_size * (1 << 3 * depth + 3); calc_full_neigh_kernel <<< CudaGetBlocks(thread_num), kCudaThreadsNum >>> ( neigh, depth, batch_size, thread_num); CUDA_POST_KERNEL_CHECK; } __global__ void gen_key_kernel(uint32* key_child, const uint32* key, const int* child, const int thread_num) { typedef unsigned char ubyte; CUDA_KERNEL_LOOP(id, thread_num) { int i = id >> 3; int j = id % 8; int label = child[i]; if (label != -1) { const ubyte* k0 = (const ubyte*)(key + i); ubyte* k1 = (ubyte*)(key_child + 8 * label + j); k1[0] = (k0[0] << 1) | ((j & 4) >> 2); k1[1] = (k0[1] << 1) | ((j & 2) >> 1); k1[2] = (k0[2] << 1) | (j & 1); k1[3] = k0[3]; } } } // use the information from parent layer to calculate the key of current layer void generate_key_gpu(uint32* key_child, const uint32* key, const int* child, const int node_num) { int n = node_num << 3; // node_num: the node number of parent layer gen_key_kernel <<< CudaGetBlocks(n), kCudaThreadsNum >>> ( key_child, key, child, n); CUDA_POST_KERNEL_CHECK; } __global__ void gen_full_key_kernel(uint32* key, const int depth, const int batch_size, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { unsigned node_num = 1 << 3 * depth; unsigned k = i % node_num; unsigned xyz = 0; unsigned char* ptr = (unsigned char*)(&xyz); #pragma unroll 8 for (int d = 0; d < depth; d++) { ptr[0] |= (k & (1 << 3 * d + 2)) >> (2 * d + 2); ptr[1] |= (k & (1 << 3 * d + 1)) >> (2 * d + 1); ptr[2] |= (k & (1 << 3 * d + 0)) >> (2 * d + 0); } ptr[3] = i / node_num; key[i] = xyz; } } void generate_key_gpu(uint32* key, const int depth, const int batch_size) { int thread_num = batch_size * (1 << 3 * depth); gen_full_key_kernel <<< CudaGetBlocks(thread_num), kCudaThreadsNum >>> ( key, depth, batch_size, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void generate_label_gpu(int* label_data, int& top_h, const Dtype* btm_data, const int btm_h, const int mask) { top_h = 0; thrust::transform_exclusive_scan(thrust::device, btm_data, btm_data + btm_h, label_data, mask == thrust::placeholders::_1, 0, thrust::plus<int>()); cudaMemcpy(&top_h, label_data + btm_h - 1, sizeof(int), cudaMemcpyDeviceToHost); Dtype flag = -1; cudaMemcpy(&flag, btm_data + btm_h - 1, sizeof(Dtype), cudaMemcpyDeviceToHost); if (mask == flag) top_h++; thrust::replace_if(thrust::device, label_data, label_data + btm_h, btm_data, mask != thrust::placeholders::_1, -1); } template <typename Dtype> void sequence_gpu(Dtype* ptr, const int num) { thrust::sequence(thrust::device, ptr, ptr + num); } __global__ void validate_search_kernel(int* idx, const unsigned* key, const int n_key, const unsigned* query, const int n_query) { CUDA_KERNEL_LOOP(i, n_query) { int j = idx[i]; if (j >= n_key || key[j] != query[i]) idx[i] = -1; } } void search_key_gpu(int* idx, const uint32* key, const int n_key, const uint32* query, const int n_query) { thrust::lower_bound(thrust::device, key, key + n_key, query, query + n_query, idx); validate_search_kernel <<< CudaGetBlocks(n_query), kCudaThreadsNum >>> ( idx, key, n_key, query, n_query); CUDA_POST_KERNEL_CHECK; } // NOTE: !!! currently the depth should be less than 8 __global__ void xyz2key_kernel(uint32* key, const uint32* xyz, const int num, const int depth) { CUDA_KERNEL_LOOP(i, num) { uint32 xyz_in = xyz[i]; uint32 key_out = 0; unsigned char* ptr = (unsigned char*)(&xyz_in); unsigned char* ptr_out = (unsigned char*)(&key_out); #pragma unroll 8 for (int d = 0; d < depth; ++d) { unsigned char mask = 1 << d; key_out |= (ptr[0] & mask) << (2 * d + 2) | (ptr[1] & mask) << (2 * d + 1) | (ptr[2] & mask) << (2 * d + 0); } ptr_out[3] = ptr[3]; key[i] = key_out; } } void xyz2key_gpu(uint32* key, const uint32* xyz, const int num, const int depth) { xyz2key_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( key, xyz, num, depth); CUDA_POST_KERNEL_CHECK; } // NOTE: !!! currently the depth should be less than 8 __global__ void key2xyz_kernel(uint32* xyz, const uint32* key, const int num, const int depth) { CUDA_KERNEL_LOOP(i, num) { uint32 key_in = key[i], xyz_out = 0; unsigned char* pt = (unsigned char*)(&xyz_out); unsigned char* ptr = (unsigned char*)(&key_in); pt[3] = ptr[3]; #pragma unroll 8 for (int d = 0; d < depth; d++) { pt[0] |= (key_in & (1u << (3 * d + 2))) >> (2 * d + 2); pt[1] |= (key_in & (1u << (3 * d + 1))) >> (2 * d + 1); pt[2] |= (key_in & (1u << (3 * d))) >> (2 * d); } xyz[i] = xyz_out; } } void key2xyz_gpu(uint32* xyz, const uint32* key, const int num, const int depth) { key2xyz_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( xyz, key, num, depth); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void align_forward_kernel(Dtype* top_data, const int Htop, const Dtype* btm_data, const int Hbtm, const int* index_data, const int num) { CUDA_KERNEL_LOOP(i, num) { int h = i % Hbtm; int c = i / Hbtm; int j = index_data[h]; if (j != -1) { top_data[c * Htop + j] = btm_data[i]; } } } template <typename Dtype> void align_forward_gpu(Dtype* top_data, const int top_h, const int channel, const Dtype* btm_data, const int btm_h, const int* idx, const int num) { memset_gpu(num, Dtype(0), top_data); align_forward_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( top_data, top_h, btm_data, btm_h, idx, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void align_backward_kernel(const Dtype* top_data, const int Htop, Dtype* btm_data, const int Hbtm, const int* index_data, const int num) { CUDA_KERNEL_LOOP(i, num) { int h = i % Hbtm; int c = i / Hbtm; int j = index_data[h]; btm_data[i] = j == -1 ? 0 : top_data[c * Htop + j]; } } template <typename Dtype> void align_backward_gpu(const Dtype* top_data, const int top_h, const int channel, Dtype* btm_data, const int btm_h, const int* idx, const int num) { align_backward_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( top_data, top_h, btm_data, btm_h, idx, num); CUDA_POST_KERNEL_CHECK; } __global__ void octree_mask_kernel(float* des, const float* src, const int* label_data, const int height, const int mask, const int n) { CUDA_KERNEL_LOOP(i, n) { int h = i % height; des[i] = label_data[h] == mask ? float(0) : src[i]; } } void octree_mask_gpu(float* out_data, const float* in_data, const int* label, int height, int mask, int num) { octree_mask_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( out_data, in_data, label, height, mask, num); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void memset_gpu<int>(const int N, const int alpha, int* Y); template void memset_gpu<float>(const int N, const float alpha, float* Y); template void memset_gpu<double>(const int N, const double alpha, double* Y); template void memset_gpu<char>(const int N, const char alpha, char* Y); template void memset_gpu<int8_t>(const int N, const int8_t alpha, int8_t* Y); template void memset_gpu<uint8_t>(const int N, const uint8_t alpha, uint8_t* Y); template void memcpy_gpu<int>(const int N, const int* X, int* Y); template void memcpy_gpu<unsigned>(const int N, const unsigned* X, unsigned* Y); template void memcpy_gpu<float>(const int N, const float* X, float* Y); template void memcpy_gpu<double>(const int N, const double* X, double* Y); template void sequence_gpu<int>(int* ptr, const int num); template void sequence_gpu<unsigned int>(unsigned int* ptr, const int num); template void pad_forward_gpu<float>(float* Y, const int Hy, const int Cy, const float* X, const int Hx, const int* label); template void pad_forward_gpu<double>(double* Y, const int Hy, const int Cy, const double* X, const int Hx, const int* label); template void pad_backward_gpu<float>(float* X, const int Hx, const int Cx, const float* Y, const int Hy, const int* label); template void pad_backward_gpu<double>(double* X, const int Hx, const int Cx, const double* Y, const int Hy, const int* label); template void octree2col_gpu<float>(float* data_col, const float* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void octree2col_gpu<double>(double* data_col, const double* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void col2octree_gpu<float>(const float* data_col, float* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void col2octree_gpu<double>(const double* data_col, double* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void generate_label_gpu<float>(int* label_data, int& top_h, const float* bottom_data, const int bottom_h, const int mask); template void generate_label_gpu<double>(int* label_data, int& top_h, const double* bottom_data, const int bottom_h, const int mask); template void generate_label_gpu<int>(int* label_data, int& top_h, const int* bottom_data, const int bottom_h, const int mask); template void octree_max_pool_gpu<float>(float* top_data, int top_h, int* mask, const float* btm_data, int bottom_h, int channel); template void octree_max_pool_gpu<double>(double* top_data, int top_h, int* mask, const double* btm_data, int bottom_h, int channel); template void octree_max_unpool_gpu<float>(const float* top_data, int top_h, const int* mask, float* btm_data, int bottom_h, int channel); template void octree_max_unpool_gpu<double>(const double* top_data, int top_h, const int* mask, double* btm_data, int bottom_h, int channel); template void octree_mask_pool_gpu<float>(float* top_data, int top_h, const int* mask, const float* btm_data, int bottom_h, int channel); template void octree_mask_pool_gpu<double>(double* top_data, int top_h, const int* mask, const double* btm_data, int bottom_h, int channel); template void align_forward_gpu(float* top_data, const int top_h, const int c, const float* btm_data, const int btm_h, const int* idx, const int n); template void align_forward_gpu(double* top_data, const int top_h, const int c, const double* btm_data, const int btm_h, const int* idx, const int n); template void align_backward_gpu(const float* top_data, const int top_h, const int c, float* btm_data, const int btm_h, const int* idx, const int n); template void align_backward_gpu(const double* top_data, const int top_h, const int c, double* btm_data, const int btm_h, const int* idx, const int n);
e3405e0a106f1e901300b09f0be1ce794b0f7d99.hip
// !!! This is a file automatically generated by hipify!!! #include <float.h> #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <cutil.h> #include "utils.h" //PASSES MAKE CHECK __global__ void flipKernel(float*arr, float *out, int w){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; // int x = blockIdx.x*16+threadIdx.x; // int y = blockIdx.y*16+threadIdx.y; out[y*w+w-x-1]=arr[y*w+x]; } void flip(float *arr, float *out, int w) { dim3 dim_blocks(w/16,w/16); dim3 dim_threads(16,16,1); hipLaunchKernelGGL(( flipKernel), dim3(dim_blocks), dim3(dim_threads), 0, 0, arr, out, w); hipDeviceSynchronize(); CUT_CHECK_ERROR(""); } __global__ void transposeKernel(float*arr, float *out, int w){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; //int x = blockIdx.x*16+threadIdx.x; //int y = blockIdx.y*16+threadIdx.y; //out[y*w+x]=arr[y+x*w]; out[y+x*w]=arr[y*w+x]; } void transpose(float *arr, float *out, int w) { dim3 dim_blocks(w/16,w/16); dim3 dim_threads(16,16,1); hipLaunchKernelGGL(( transposeKernel), dim3(dim_blocks), dim3(dim_threads), 0, 0, arr, out, w); hipDeviceSynchronize(); CUT_CHECK_ERROR(""); } __global__ void reductionKernel(float* A, int len, int stride) { int index = blockIdx.x*blockDim.x+threadIdx.x; if (index+stride<len){ index+=blockIdx.y*len; A[index] += A[index+stride]; } } void reductionGPU(float* A, int len) { int threads_per_block = 512; int blocks_per_grid = (len/1024); if (blocks_per_grid==0){ blocks_per_grid=1; } int stride = len/2; while (stride>=1) { dim3 dim_blocks(blocks_per_grid,1); dim3 dim_threads(threads_per_block,1,1); hipLaunchKernelGGL(( reductionKernel), dim3(dim_blocks), dim3(dim_threads), 0, 0, A,len,stride); hipDeviceSynchronize(); CUT_CHECK_ERROR(""); stride /=2; if (blocks_per_grid!=1){ blocks_per_grid /=2; } else if(threads_per_block != 1){ threads_per_block /= 2; } } } __global__ void calcdistKernel(float* out, float* img, int i_w, int i_h, float* t, int t_w, int x, int y) { int index0 = blockIdx.x*16 + threadIdx.x + (blockIdx.y)*t_w + t_w*t_w*threadIdx.z; int index1 = blockIdx.x*16 + threadIdx.x + x + (blockIdx.y+y)*i_w; //int index2 = index0 + t_w*t_w*threadIDx.z; float diff = img[index1]-t[index0]; out[index0] = diff*diff; } float calc_min_dist(float *img, int i_w, int i_h, float *t, int t_w) { float min_dist = FLT_MAX; float* dist = (float*)malloc(sizeof(float)); int x = i_w-t_w; int y = i_h-t_w; float* t2,* t3,* t4; int t_len = t_w*t_w; int t_len2 = t_len*sizeof(float); //int len = 8*t_w*t_w*sizeof(float); CUDA_SAFE_CALL(hipMalloc(&t2, t_len2)); CUDA_SAFE_CALL(hipMalloc(&t3, t_len2)); float* out; CUDA_SAFE_CALL(hipMalloc(&out, t_len2)); CUDA_SAFE_CALL(hipMemcpy(t2, t,t_len2,hipMemcpyDeviceToDevice)); // //t_len *=4; // flip(t, t2+t_len2, t_w); // //flip(t2+t_len2, t2, t_w); // transpose(t2+t_len2, t2+t_len2*2, t_w); // flip(t2+t_len2*2, t2+t_len2*3, t_w); // transpose(t2+t_len2*3, t2+t_len2*4, t_w); // flip(t2+t_len2*4, t2+t_len2*5, t_w); // transpose(t2+t_len2*5, t2+t_len2*6, t_w); // flip(t2+t_len2*6, t2+t_len2*7, t_w); // //t_len/=4; for (int k = 1; k<9;k++){ if (k%2==0){ flip(t2, t3, t_w); t4=t2; t2=t3; t3=t4; }else if(k!=1){ transpose(t2, t3, t_w); t4=t2; t2=t3; t3=t4; } for (int i=0; i<=x;i++){ for (int j=0; j<=y;j++){ //printf("k:%d/n",k); dim3 dim_blocks(t_w/16,t_w); dim3 dim_threads(16, 1, 1); hipLaunchKernelGGL(( calcdistKernel), dim3(dim_blocks), dim3(dim_threads), 0, 0, out, img, i_w, i_h, t2, t_w, i, j); hipDeviceSynchronize(); CUT_CHECK_ERROR(""); reductionGPU(out, t_len); CUDA_SAFE_CALL(hipMemcpy(dist, out,sizeof(float),hipMemcpyDeviceToHost)); if(*dist<min_dist) min_dist=*dist; //int offset=t_len; // CUDA_SAFE_CALL(hipMemcpy(dist, out+offset,sizeof(float),hipMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(hipMemcpy(dist, out+offset,sizeof(float),hipMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(hipMemcpy(dist, out+offset,sizeof(float),hipMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(hipMemcpy(dist, out+offset,sizeof(float),hipMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(hipMemcpy(dist, out+offset,sizeof(float),hipMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(hipMemcpy(dist, out+offset,sizeof(float),hipMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(hipMemcpy(dist, out+offset,sizeof(float),hipMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; } } } CUDA_SAFE_CALL(hipFree(t2)); CUDA_SAFE_CALL(hipFree(out)); return min_dist; }
e3405e0a106f1e901300b09f0be1ce794b0f7d99.cu
#include <float.h> #include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <cutil.h> #include "utils.h" //PASSES MAKE CHECK __global__ void flipKernel(float*arr, float *out, int w){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; // int x = blockIdx.x*16+threadIdx.x; // int y = blockIdx.y*16+threadIdx.y; out[y*w+w-x-1]=arr[y*w+x]; } void flip(float *arr, float *out, int w) { dim3 dim_blocks(w/16,w/16); dim3 dim_threads(16,16,1); flipKernel<<<dim_blocks, dim_threads>>>(arr, out, w); cudaThreadSynchronize(); CUT_CHECK_ERROR(""); } __global__ void transposeKernel(float*arr, float *out, int w){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; //int x = blockIdx.x*16+threadIdx.x; //int y = blockIdx.y*16+threadIdx.y; //out[y*w+x]=arr[y+x*w]; out[y+x*w]=arr[y*w+x]; } void transpose(float *arr, float *out, int w) { dim3 dim_blocks(w/16,w/16); dim3 dim_threads(16,16,1); transposeKernel<<<dim_blocks, dim_threads>>>(arr, out, w); cudaThreadSynchronize(); CUT_CHECK_ERROR(""); } __global__ void reductionKernel(float* A, int len, int stride) { int index = blockIdx.x*blockDim.x+threadIdx.x; if (index+stride<len){ index+=blockIdx.y*len; A[index] += A[index+stride]; } } void reductionGPU(float* A, int len) { int threads_per_block = 512; int blocks_per_grid = (len/1024); if (blocks_per_grid==0){ blocks_per_grid=1; } int stride = len/2; while (stride>=1) { dim3 dim_blocks(blocks_per_grid,1); dim3 dim_threads(threads_per_block,1,1); reductionKernel<<<dim_blocks, dim_threads>>>(A,len,stride); cudaThreadSynchronize(); CUT_CHECK_ERROR(""); stride /=2; if (blocks_per_grid!=1){ blocks_per_grid /=2; } else if(threads_per_block != 1){ threads_per_block /= 2; } } } __global__ void calcdistKernel(float* out, float* img, int i_w, int i_h, float* t, int t_w, int x, int y) { int index0 = blockIdx.x*16 + threadIdx.x + (blockIdx.y)*t_w + t_w*t_w*threadIdx.z; int index1 = blockIdx.x*16 + threadIdx.x + x + (blockIdx.y+y)*i_w; //int index2 = index0 + t_w*t_w*threadIDx.z; float diff = img[index1]-t[index0]; out[index0] = diff*diff; } float calc_min_dist(float *img, int i_w, int i_h, float *t, int t_w) { float min_dist = FLT_MAX; float* dist = (float*)malloc(sizeof(float)); int x = i_w-t_w; int y = i_h-t_w; float* t2,* t3,* t4; int t_len = t_w*t_w; int t_len2 = t_len*sizeof(float); //int len = 8*t_w*t_w*sizeof(float); CUDA_SAFE_CALL(cudaMalloc(&t2, t_len2)); CUDA_SAFE_CALL(cudaMalloc(&t3, t_len2)); float* out; CUDA_SAFE_CALL(cudaMalloc(&out, t_len2)); CUDA_SAFE_CALL(cudaMemcpy(t2, t,t_len2,cudaMemcpyDeviceToDevice)); // //t_len *=4; // flip(t, t2+t_len2, t_w); // //flip(t2+t_len2, t2, t_w); // transpose(t2+t_len2, t2+t_len2*2, t_w); // flip(t2+t_len2*2, t2+t_len2*3, t_w); // transpose(t2+t_len2*3, t2+t_len2*4, t_w); // flip(t2+t_len2*4, t2+t_len2*5, t_w); // transpose(t2+t_len2*5, t2+t_len2*6, t_w); // flip(t2+t_len2*6, t2+t_len2*7, t_w); // //t_len/=4; for (int k = 1; k<9;k++){ if (k%2==0){ flip(t2, t3, t_w); t4=t2; t2=t3; t3=t4; }else if(k!=1){ transpose(t2, t3, t_w); t4=t2; t2=t3; t3=t4; } for (int i=0; i<=x;i++){ for (int j=0; j<=y;j++){ //printf("k:%d/n",k); dim3 dim_blocks(t_w/16,t_w); dim3 dim_threads(16, 1, 1); calcdistKernel<<<dim_blocks, dim_threads>>>(out, img, i_w, i_h, t2, t_w, i, j); cudaThreadSynchronize(); CUT_CHECK_ERROR(""); reductionGPU(out, t_len); CUDA_SAFE_CALL(cudaMemcpy(dist, out,sizeof(float),cudaMemcpyDeviceToHost)); if(*dist<min_dist) min_dist=*dist; //int offset=t_len; // CUDA_SAFE_CALL(cudaMemcpy(dist, out+offset,sizeof(float),cudaMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(cudaMemcpy(dist, out+offset,sizeof(float),cudaMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(cudaMemcpy(dist, out+offset,sizeof(float),cudaMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(cudaMemcpy(dist, out+offset,sizeof(float),cudaMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(cudaMemcpy(dist, out+offset,sizeof(float),cudaMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(cudaMemcpy(dist, out+offset,sizeof(float),cudaMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; // offset+=t_len; // CUDA_SAFE_CALL(cudaMemcpy(dist, out+offset,sizeof(float),cudaMemcpyDeviceToHost)); // if(*dist<min_dist) min_dist=*dist; } } } CUDA_SAFE_CALL(cudaFree(t2)); CUDA_SAFE_CALL(cudaFree(out)); return min_dist; }
8ab424b41b7cff2007fdd1e9ebaf52c99fbcc2b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "math_functions.h" #include "device_launch_parameters.h" #include <iostream> #include <vector> #include <sstream> #include <cstdio> #include <cstdlib> #include <stdio.h> #include <hiprand/hiprand.h> #include <cusolverSp.h> #include <helper_cuda.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include "rocblas.h" #include "cusolverDn.h" #include "helper_cusolver.h" using namespace std; void PrintDeviceProperties() { // Initialize Variables int device = 0; hipDeviceProp_t prop; // Get Properties of this device hipGetDeviceProperties(&prop, device); // Print Properties of this device printf("\n"); printf("Device Number: %d\n", device); printf(" Device name: %s\n", prop.name); printf(" Warp Size: %i\n", prop.warpSize); printf(" Max Threads Per Block: %i\n", prop.maxThreadsPerBlock); } // Function to retrieving time __host__ hipEvent_t get_time(void) { hipEvent_t time; hipEventCreate(&time); hipEventRecord(time); return time; } __global__ void init(unsigned int seed, hiprandState_t* states, const int numberOfElements) { // Initialize Variables const int currentThreadIndex = (blockIdx.x * blockDim.x) + threadIdx.x; // Exit if Thread out of bounds if (currentThreadIndex > numberOfElements) { return; } // Initialize Random Value hiprand_init(seed, currentThreadIndex, 0, &states[currentThreadIndex]); } __global__ void randoms(hiprandState_t* states, double* matrix, const int numberOfElements) { // Initialize Variables const int currentThreadIndex = (blockIdx.x * blockDim.x) + threadIdx.x; // Exit if Thread out of bounds if (currentThreadIndex > numberOfElements) { return; } // Set Random Number in Thread Index - make sure non-zero to prevent singular matrix for testing matrix[currentThreadIndex] = max(hiprand(&states[currentThreadIndex]) % 100, 1); } void GetRandomNumbersForMatrix(double *cpuMatrix, const int numberOfElements) { // Initialize Variables int device = 0; int numberOfBlocks; hipDeviceProp_t prop; int numberOfThreads; double *gpuMatrix = 0; hiprandState_t* states; int maxThreadsPerBlock; const int numberOfBytesInMatrix = numberOfElements * sizeof(double); // Get Properties of this device hipGetDeviceProperties(&prop, device); // Get Max Threads Per Block maxThreadsPerBlock = prop.maxThreadsPerBlock; // Verify that Machine has GPU Installed by // selecting first GPU available. hipSetDevice(0); // Get Number of Blocks Required and Number of Threads numberOfBlocks = (int)(numberOfElements / maxThreadsPerBlock) + 1; numberOfThreads = (int)(numberOfElements % maxThreadsPerBlock); // Allocate GPU Memory for States hipMalloc((void**)&states, numberOfElements * sizeof(hiprandState_t)); // Run Initialization init << <numberOfBlocks, numberOfThreads >> >((unsigned int)time(0), states, numberOfElements); // Allocate GPU Memory for input matrix hipMalloc((void**)&gpuMatrix, numberOfBytesInMatrix); // Add Random Numbers to Matrix hipLaunchKernelGGL(( randoms), dim3(numberOfBlocks), dim3(numberOfThreads), 0, 0, states, gpuMatrix, numberOfElements); // Copy Matrix Data From CPU Memory to GPU Memory hipMemcpy(cpuMatrix, gpuMatrix, numberOfBytesInMatrix, hipMemcpyDeviceToHost); // Free Allocated Memory hipFree(gpuMatrix); } std::string GetMatrixAsString(double *matrixElementsPntr, int squareMatrixDimension) { // Initialize Variable std::ostringstream matrixAsStringStream; matrixAsStringStream << "{" << std::endl; // Step through each row in matrix for (int i = 0; i < squareMatrixDimension; i++) { // Spacing for initial elements matrixAsStringStream << " "; // Step through each column in this row for (int j = 0; j < squareMatrixDimension; j++) { matrixAsStringStream << matrixElementsPntr[((i * squareMatrixDimension) + j)] << " "; } matrixAsStringStream << std::endl; } matrixAsStringStream << "};" << std::endl; // Return Matrix as String return matrixAsStringStream.str(); } float GetInvertedMatrixCPU(double *cpuInvertedMatrix, const double *cpuLUMatrix, const int *cpuPivotMatrix, const int squareMatrixDimension) { // Initialize Variables hipEvent_t stop; hipEvent_t start; double sumLowerTriangle = 0; double sumUpperTriangle = 0; float timeToCompleteInMs = 0; vector<double> solveArray(squareMatrixDimension, 0.0); vector<double> fowardSubstitutionArray(squareMatrixDimension, 0.0); vector<double> backwardSubtitutionArray(squareMatrixDimension, 0.0); // Keep Track of Start Time start = get_time(); // Solve for the Identity Matrix using resuls of LU Decomposition // Step through each row and solve for (int overallRowIndex = 0; overallRowIndex < squareMatrixDimension; overallRowIndex++) { // Initialize solveArray = vector<double>(squareMatrixDimension, 0.0); // Set to Identity solveArray[overallRowIndex] = 1; // Solve by doing foward substition for (int rowIndex = 0; rowIndex < squareMatrixDimension; rowIndex++) { // Set Accumulating sum to 0 sumLowerTriangle = 0; // Step through Each Column for (int columnIndex = 0; columnIndex < rowIndex; columnIndex++) { // Accumulate Lower Triangle Sum sumLowerTriangle += (cpuLUMatrix[(rowIndex * squareMatrixDimension) + columnIndex] * fowardSubstitutionArray[columnIndex]); } // Perform Foward Substituition using Pivot Array and Accumulating Lower Triangle Sum fowardSubstitutionArray[rowIndex] = solveArray[cpuPivotMatrix[rowIndex]] - sumLowerTriangle; } // Solve by doing backward substition for (int rowIndex = squareMatrixDimension - 1; rowIndex >= 0; rowIndex--) { // Set Accumulating sum to 0 sumUpperTriangle = 0; // Step through Each Column for (int columnIndex = rowIndex + 1; columnIndex < squareMatrixDimension; columnIndex++) { sumUpperTriangle += (cpuLUMatrix[(rowIndex * squareMatrixDimension) + columnIndex] * backwardSubtitutionArray[columnIndex]); } backwardSubtitutionArray[rowIndex] = ((fowardSubstitutionArray[rowIndex] - sumUpperTriangle) / cpuLUMatrix[(rowIndex * squareMatrixDimension) + rowIndex]); } // Perform final update to get Inverted Matrix for (int overallColumnIndex = 0; overallColumnIndex < squareMatrixDimension; overallColumnIndex++) { // Update Inverse Matrix cpuInvertedMatrix[(overallColumnIndex * squareMatrixDimension) + overallRowIndex] = backwardSubtitutionArray[overallColumnIndex]; } } // Keep Track of Stop Time stop = get_time(); // Synchronize Events timeToCompleteInMs = 0; hipEventSynchronize(stop); hipEventElapsedTime(&timeToCompleteInMs, start, stop); // Return time required to complete return timeToCompleteInMs; } float GetLUDecompositionMatrixCPU(double *cpuInvertedMatrix, int *cpuPivotMatrix, const double *cpuMatrix, const int numberOfElements, const int squareMatrixDimension) { // Initialize Variables hipEvent_t stop; hipEvent_t start; int maxValueIndex = 0; double largestValue = 0.0; float timeToCompleteInMs = 0; double matrixCurrentColumnValue = 0; double matrixLargestColumnValue = 0; int pivotMatrixCurrentColumnValue = 0; int pivotMatrixLargestColumnValue = 0; int *cpuMaxValueIndex = (int *)malloc(sizeof(int)); // Copy Initial Matrix into Inverted Matrix hipMemcpy(cpuInvertedMatrix, cpuMatrix, numberOfElements * sizeof(double), hipMemcpyHostToHost); // Keep Track of Start Time start = get_time(); // for each column in matrix for (int columnIndexInMatrix = 0; columnIndexInMatrix < squareMatrixDimension; columnIndexInMatrix++) { // Get all row elements in current column largestValue = 0; for (int rowIndex = columnIndexInMatrix; rowIndex < squareMatrixDimension; rowIndex++) { // Get Max Row Value in Column if (abs(cpuInvertedMatrix[(rowIndex * squareMatrixDimension) + columnIndexInMatrix]) > largestValue) { largestValue = abs(cpuInvertedMatrix[(rowIndex * squareMatrixDimension) + columnIndexInMatrix]); maxValueIndex = rowIndex; } } // Update Pivot Matrix Indices and Values pivotMatrixCurrentColumnValue = cpuPivotMatrix[columnIndexInMatrix]; pivotMatrixLargestColumnValue = cpuPivotMatrix[maxValueIndex]; cpuPivotMatrix[columnIndexInMatrix] = pivotMatrixLargestColumnValue; cpuPivotMatrix[maxValueIndex] = pivotMatrixCurrentColumnValue; // Each Column Will Exchange Current Column Row Element with Pivot Element for (int columnIndex = 0; columnIndex < squareMatrixDimension; columnIndex++) { matrixCurrentColumnValue = cpuInvertedMatrix[(columnIndexInMatrix * squareMatrixDimension) + columnIndex]; matrixLargestColumnValue = cpuInvertedMatrix[(maxValueIndex * squareMatrixDimension) + columnIndex]; cpuInvertedMatrix[(columnIndexInMatrix * squareMatrixDimension) + columnIndex] = matrixLargestColumnValue; cpuInvertedMatrix[(maxValueIndex * squareMatrixDimension) + columnIndex] = matrixCurrentColumnValue; } // Perform Shurs Complement for (int rowIndex = columnIndexInMatrix + 1; rowIndex < squareMatrixDimension; rowIndex++) { cpuInvertedMatrix[(rowIndex * squareMatrixDimension) + columnIndexInMatrix] /= cpuInvertedMatrix[(columnIndexInMatrix * squareMatrixDimension) + columnIndexInMatrix]; for (int columnIndex = columnIndexInMatrix + 1; columnIndex < squareMatrixDimension; columnIndex++) { cpuInvertedMatrix[(rowIndex * squareMatrixDimension) + columnIndex] -= (cpuInvertedMatrix[(rowIndex * squareMatrixDimension) + columnIndexInMatrix] * cpuInvertedMatrix[(columnIndexInMatrix * squareMatrixDimension) + columnIndex]); } } } // Keep Track of Stop Time stop = get_time(); // Synchronize Events timeToCompleteInMs = 0; hipEventSynchronize(stop); hipEventElapsedTime(&timeToCompleteInMs, start, stop); // Return time required to complete return timeToCompleteInMs; } float GetCuSparseInvertedMatrixGPU(double *cpuInvertedMatrix, const double *cpuMatrix, const int squareMatrixDimension) { // Initialize Variables int batch = 1; int *info = NULL; hipEvent_t stop; hipEvent_t start; hipblasHandle_t handle; double *gpuLUDecompositionMatrix = NULL; int *gpuPivotMatrix = NULL; double *gpuInvertedMatrix = NULL; float timeToCompleteInMs = 0; // Allocate Device Memory hipMalloc((void **)&gpuPivotMatrix, sizeof(int)*squareMatrixDimension); hipMalloc((void **)&gpuLUDecompositionMatrix, sizeof(double)*squareMatrixDimension*squareMatrixDimension); hipMalloc((void **)&gpuInvertedMatrix, sizeof(double)*squareMatrixDimension*squareMatrixDimension); // Copy Data from CPU to GPU hipMemcpy(gpuLUDecompositionMatrix, cpuMatrix, sizeof(double)*squareMatrixDimension*squareMatrixDimension, hipMemcpyHostToDevice); hipMemcpy(gpuInvertedMatrix, cpuInvertedMatrix, sizeof(double)*squareMatrixDimension*squareMatrixDimension, hipMemcpyHostToDevice); // Initialize More Variables double **gpuInvertedMatrixArrayOfPointers = NULL; double **gpuLUDecompositionMatrixArrayOfPointers = NULL; double *cpuInvertedMatrixArray[] = { gpuInvertedMatrix }; double *cpuLUDecompositionMatrixArray[] = { gpuLUDecompositionMatrix }; // Create Handle hipblasCreate(&handle); // Allocate Memory to device arrays hipMalloc((void **)&gpuInvertedMatrixArrayOfPointers, sizeof(cpuInvertedMatrixArray)); hipMalloc((void **)&gpuLUDecompositionMatrixArrayOfPointers, sizeof(cpuLUDecompositionMatrixArray)); // Copy Data from CPU to GPU hipMemcpy(gpuInvertedMatrixArrayOfPointers, cpuInvertedMatrixArray, sizeof(cpuInvertedMatrixArray), hipMemcpyHostToDevice); hipMemcpy(gpuLUDecompositionMatrixArrayOfPointers, cpuLUDecompositionMatrixArray, sizeof(cpuLUDecompositionMatrixArray), hipMemcpyHostToDevice); // Keep Track of Start Time start = get_time(); // Create Buffer hipMalloc((void **)&info, sizeof(int)); // Initiailize Memory for Info hipMemset(info, 0, sizeof(int)); // Perform LU Decomposition hipblasDgetrfBatched(handle, squareMatrixDimension, gpuLUDecompositionMatrixArrayOfPointers, squareMatrixDimension, gpuPivotMatrix, info, batch); // Compute Matrix Inverse hipblasDgetriBatched(handle, squareMatrixDimension, (const double **)gpuLUDecompositionMatrixArrayOfPointers, squareMatrixDimension, gpuPivotMatrix, gpuInvertedMatrixArrayOfPointers, squareMatrixDimension, info, batch); hipDeviceSynchronize(); // Copy results from GPU Memory to Host Memory hipMemcpy(cpuInvertedMatrix, gpuInvertedMatrix, sizeof(double)*squareMatrixDimension*squareMatrixDimension, hipMemcpyDeviceToHost); // Keep Track of Stop Time stop = get_time(); // Synchronize Events timeToCompleteInMs = 0; hipEventSynchronize(stop); hipEventElapsedTime(&timeToCompleteInMs, start, stop); // Free up allocated memory if (handle) { hipblasDestroy(handle); } if (gpuPivotMatrix) { hipFree(gpuPivotMatrix); } if (gpuInvertedMatrix) { hipFree(gpuInvertedMatrix); } if (gpuInvertedMatrixArrayOfPointers) { hipFree(gpuInvertedMatrixArrayOfPointers); } if (gpuLUDecompositionMatrixArrayOfPointers) { hipFree(gpuLUDecompositionMatrixArrayOfPointers); } // return time required to complete matrix inversion return timeToCompleteInMs; } float InvertCPU(double *cpuInvertedMatrix, const double *cpuMatrix, const int squareMatrixDimension) { // Initialize Variables float timeToGetLUDecompositionMatrix; float timeToInvertMatrixFromLUDecompositionAndPivotMatrix; const int numberOfElements = squareMatrixDimension * squareMatrixDimension; int *cpuPivotMatrixElementsPntr = (int *)malloc(squareMatrixDimension * sizeof(int)); double *cpuLUMatrixElementsPntr = (double *)malloc(squareMatrixDimension * squareMatrixDimension * sizeof(double)); // Initialize Pivot Matrix for (int i = 0; i < squareMatrixDimension; i++) { cpuPivotMatrixElementsPntr[i] = i; } // Add elements to matrix for (int i = 0; i < numberOfElements; i++) { cpuInvertedMatrix[i] = cpuMatrix[i]; cpuLUMatrixElementsPntr[i] = cpuMatrix[i]; } // On the CPU - Perform LU Decomposition to get LU Matrix and Pivot Matrix - returns time required to complete in ms timeToGetLUDecompositionMatrix = GetLUDecompositionMatrixCPU(cpuLUMatrixElementsPntr, cpuPivotMatrixElementsPntr, cpuMatrix, numberOfElements, squareMatrixDimension); // On the CPU - Use the LU Matrix and Pivot Matrix to get Inverte Matrix - returns time required to complete in ms timeToInvertMatrixFromLUDecompositionAndPivotMatrix = GetInvertedMatrixCPU(cpuInvertedMatrix, cpuLUMatrixElementsPntr, cpuPivotMatrixElementsPntr, squareMatrixDimension); // Accumulate all Time Required to invert Matrix on cpu return (timeToGetLUDecompositionMatrix + timeToInvertMatrixFromLUDecompositionAndPivotMatrix); } float InvertGPU(double *cpuInvertedMatrix, const double *cpuMatrix, const int squareMatrixDimension) { return GetCuSparseInvertedMatrixGPU(cpuInvertedMatrix, cpuMatrix, squareMatrixDimension); } // Main Function int main(int argc, char *argv[]) { // Print Arguments for Debugging std::cout << "Number of Arguments: " << argc << endl; std::cout << endl; // First Arg is Binary Name std::cout << "Binary Name: " << argv[0] << endl; std::cout << endl; // Second Arg is # of Threads std::cout << "Matrix Dimension: " << argv[1] << endl; std::cout << endl; // Initialize Variables std::string userInput{ "" }; bool invertSuccess = false; float cpuTimeToCompleteInMs = 0; float gpuTimeToCompleteInMs = 0; int numberOfRows = atoi(argv[1]); int numberOfColumns = atoi(argv[1]); double *cpuMatrixElementsPntr = 0; std::string cpuMatrixInversionResult{ "" }; std::string gpuMatrixInversionResult{ "" }; int numberOfElements = numberOfRows * numberOfColumns; double *cpuInvertedMatrixElementsPntrFromCPUComputation = 0; double *cpuInvertedMatrixElementsPntrFromGPUComputation = 0; int squareMatrixDimension = min(numberOfRows, numberOfColumns); // Allocate Memory cpuMatrixElementsPntr = (double *)malloc(numberOfElements * sizeof(double)); cpuInvertedMatrixElementsPntrFromGPUComputation = (double *)malloc(numberOfElements * sizeof(double)); cpuInvertedMatrixElementsPntrFromCPUComputation = (double *)malloc(numberOfElements * sizeof(double)); // Get Random Values for Elements GetRandomNumbersForMatrix(cpuMatrixElementsPntr, numberOfElements); // Print Matrix as String std::cout << "Original Matrix:" << endl; std::cout << GetMatrixAsString(cpuMatrixElementsPntr, squareMatrixDimension) << endl; std::cout << endl; // Perform GPU Matrix Inversion gpuTimeToCompleteInMs = InvertGPU(cpuInvertedMatrixElementsPntrFromGPUComputation, cpuMatrixElementsPntr, squareMatrixDimension); // Get GPU Computed Matrix Inversion as String gpuMatrixInversionResult = GetMatrixAsString(cpuInvertedMatrixElementsPntrFromGPUComputation, squareMatrixDimension); // Print Inverted Matrix (GPU) as String std::cout << "Inverted Matrix (GPU):" << endl; std::cout << gpuMatrixInversionResult << endl; std::cout << endl; // Perform CPU Matrix Inversion cpuTimeToCompleteInMs = InvertCPU(cpuInvertedMatrixElementsPntrFromCPUComputation, cpuMatrixElementsPntr, squareMatrixDimension); // Get CPU Computed Matrix Inversion as String cpuMatrixInversionResult = GetMatrixAsString(cpuInvertedMatrixElementsPntrFromCPUComputation, squareMatrixDimension); // Print Inverted Matrix (CPU) as String std::cout << "Inverted Matrix (CPU):" << endl; std::cout << cpuMatrixInversionResult << endl; std::cout << endl; // Check Results for success invertSuccess = (cpuMatrixInversionResult == gpuMatrixInversionResult); // Print out Results std::cout << "Results for Dimension " << squareMatrixDimension << ":" << endl; std::cout << " Invert Equivalent: " << ((invertSuccess == 1) ? "Success" : "Failed") << endl; std::cout << " CPU Time (ms): " << cpuTimeToCompleteInMs << endl; std::cout << " GPU Time (ms): " << gpuTimeToCompleteInMs << endl; std::cout << " Fastest: " << ((cpuTimeToCompleteInMs < gpuTimeToCompleteInMs) ? "CPU" : "GPU") << endl; std::cout << endl; // Wait for user to close application std::cout << "Press Any Button to Exit..." << endl; // Get User Input getline(cin, userInput); // return return EXIT_SUCCESS; }
8ab424b41b7cff2007fdd1e9ebaf52c99fbcc2b2.cu
#include "cuda_runtime.h" #include "math_functions.h" #include "device_launch_parameters.h" #include <iostream> #include <vector> #include <sstream> #include <cstdio> #include <cstdlib> #include <stdio.h> #include <curand.h> #include <cusolverSp.h> #include <helper_cuda.h> #include <cuda_runtime.h> #include <curand_kernel.h> #include "cublas_v2.h" #include "cusolverDn.h" #include "helper_cusolver.h" using namespace std; void PrintDeviceProperties() { // Initialize Variables int device = 0; cudaDeviceProp prop; // Get Properties of this device cudaGetDeviceProperties(&prop, device); // Print Properties of this device printf("\n"); printf("Device Number: %d\n", device); printf(" Device name: %s\n", prop.name); printf(" Warp Size: %i\n", prop.warpSize); printf(" Max Threads Per Block: %i\n", prop.maxThreadsPerBlock); } // Function to retrieving time __host__ cudaEvent_t get_time(void) { cudaEvent_t time; cudaEventCreate(&time); cudaEventRecord(time); return time; } __global__ void init(unsigned int seed, curandState_t* states, const int numberOfElements) { // Initialize Variables const int currentThreadIndex = (blockIdx.x * blockDim.x) + threadIdx.x; // Exit if Thread out of bounds if (currentThreadIndex > numberOfElements) { return; } // Initialize Random Value curand_init(seed, currentThreadIndex, 0, &states[currentThreadIndex]); } __global__ void randoms(curandState_t* states, double* matrix, const int numberOfElements) { // Initialize Variables const int currentThreadIndex = (blockIdx.x * blockDim.x) + threadIdx.x; // Exit if Thread out of bounds if (currentThreadIndex > numberOfElements) { return; } // Set Random Number in Thread Index - make sure non-zero to prevent singular matrix for testing matrix[currentThreadIndex] = max(curand(&states[currentThreadIndex]) % 100, 1); } void GetRandomNumbersForMatrix(double *cpuMatrix, const int numberOfElements) { // Initialize Variables int device = 0; int numberOfBlocks; cudaDeviceProp prop; int numberOfThreads; double *gpuMatrix = 0; curandState_t* states; int maxThreadsPerBlock; const int numberOfBytesInMatrix = numberOfElements * sizeof(double); // Get Properties of this device cudaGetDeviceProperties(&prop, device); // Get Max Threads Per Block maxThreadsPerBlock = prop.maxThreadsPerBlock; // Verify that Machine has GPU Installed by // selecting first GPU available. cudaSetDevice(0); // Get Number of Blocks Required and Number of Threads numberOfBlocks = (int)(numberOfElements / maxThreadsPerBlock) + 1; numberOfThreads = (int)(numberOfElements % maxThreadsPerBlock); // Allocate GPU Memory for States cudaMalloc((void**)&states, numberOfElements * sizeof(curandState_t)); // Run Initialization init << <numberOfBlocks, numberOfThreads >> >((unsigned int)time(0), states, numberOfElements); // Allocate GPU Memory for input matrix cudaMalloc((void**)&gpuMatrix, numberOfBytesInMatrix); // Add Random Numbers to Matrix randoms<<<numberOfBlocks, numberOfThreads>>>(states, gpuMatrix, numberOfElements); // Copy Matrix Data From CPU Memory to GPU Memory cudaMemcpy(cpuMatrix, gpuMatrix, numberOfBytesInMatrix, cudaMemcpyDeviceToHost); // Free Allocated Memory cudaFree(gpuMatrix); } std::string GetMatrixAsString(double *matrixElementsPntr, int squareMatrixDimension) { // Initialize Variable std::ostringstream matrixAsStringStream; matrixAsStringStream << "{" << std::endl; // Step through each row in matrix for (int i = 0; i < squareMatrixDimension; i++) { // Spacing for initial elements matrixAsStringStream << " "; // Step through each column in this row for (int j = 0; j < squareMatrixDimension; j++) { matrixAsStringStream << matrixElementsPntr[((i * squareMatrixDimension) + j)] << " "; } matrixAsStringStream << std::endl; } matrixAsStringStream << "};" << std::endl; // Return Matrix as String return matrixAsStringStream.str(); } float GetInvertedMatrixCPU(double *cpuInvertedMatrix, const double *cpuLUMatrix, const int *cpuPivotMatrix, const int squareMatrixDimension) { // Initialize Variables cudaEvent_t stop; cudaEvent_t start; double sumLowerTriangle = 0; double sumUpperTriangle = 0; float timeToCompleteInMs = 0; vector<double> solveArray(squareMatrixDimension, 0.0); vector<double> fowardSubstitutionArray(squareMatrixDimension, 0.0); vector<double> backwardSubtitutionArray(squareMatrixDimension, 0.0); // Keep Track of Start Time start = get_time(); // Solve for the Identity Matrix using resuls of LU Decomposition // Step through each row and solve for (int overallRowIndex = 0; overallRowIndex < squareMatrixDimension; overallRowIndex++) { // Initialize solveArray = vector<double>(squareMatrixDimension, 0.0); // Set to Identity solveArray[overallRowIndex] = 1; // Solve by doing foward substition for (int rowIndex = 0; rowIndex < squareMatrixDimension; rowIndex++) { // Set Accumulating sum to 0 sumLowerTriangle = 0; // Step through Each Column for (int columnIndex = 0; columnIndex < rowIndex; columnIndex++) { // Accumulate Lower Triangle Sum sumLowerTriangle += (cpuLUMatrix[(rowIndex * squareMatrixDimension) + columnIndex] * fowardSubstitutionArray[columnIndex]); } // Perform Foward Substituition using Pivot Array and Accumulating Lower Triangle Sum fowardSubstitutionArray[rowIndex] = solveArray[cpuPivotMatrix[rowIndex]] - sumLowerTriangle; } // Solve by doing backward substition for (int rowIndex = squareMatrixDimension - 1; rowIndex >= 0; rowIndex--) { // Set Accumulating sum to 0 sumUpperTriangle = 0; // Step through Each Column for (int columnIndex = rowIndex + 1; columnIndex < squareMatrixDimension; columnIndex++) { sumUpperTriangle += (cpuLUMatrix[(rowIndex * squareMatrixDimension) + columnIndex] * backwardSubtitutionArray[columnIndex]); } backwardSubtitutionArray[rowIndex] = ((fowardSubstitutionArray[rowIndex] - sumUpperTriangle) / cpuLUMatrix[(rowIndex * squareMatrixDimension) + rowIndex]); } // Perform final update to get Inverted Matrix for (int overallColumnIndex = 0; overallColumnIndex < squareMatrixDimension; overallColumnIndex++) { // Update Inverse Matrix cpuInvertedMatrix[(overallColumnIndex * squareMatrixDimension) + overallRowIndex] = backwardSubtitutionArray[overallColumnIndex]; } } // Keep Track of Stop Time stop = get_time(); // Synchronize Events timeToCompleteInMs = 0; cudaEventSynchronize(stop); cudaEventElapsedTime(&timeToCompleteInMs, start, stop); // Return time required to complete return timeToCompleteInMs; } float GetLUDecompositionMatrixCPU(double *cpuInvertedMatrix, int *cpuPivotMatrix, const double *cpuMatrix, const int numberOfElements, const int squareMatrixDimension) { // Initialize Variables cudaEvent_t stop; cudaEvent_t start; int maxValueIndex = 0; double largestValue = 0.0; float timeToCompleteInMs = 0; double matrixCurrentColumnValue = 0; double matrixLargestColumnValue = 0; int pivotMatrixCurrentColumnValue = 0; int pivotMatrixLargestColumnValue = 0; int *cpuMaxValueIndex = (int *)malloc(sizeof(int)); // Copy Initial Matrix into Inverted Matrix cudaMemcpy(cpuInvertedMatrix, cpuMatrix, numberOfElements * sizeof(double), cudaMemcpyHostToHost); // Keep Track of Start Time start = get_time(); // for each column in matrix for (int columnIndexInMatrix = 0; columnIndexInMatrix < squareMatrixDimension; columnIndexInMatrix++) { // Get all row elements in current column largestValue = 0; for (int rowIndex = columnIndexInMatrix; rowIndex < squareMatrixDimension; rowIndex++) { // Get Max Row Value in Column if (abs(cpuInvertedMatrix[(rowIndex * squareMatrixDimension) + columnIndexInMatrix]) > largestValue) { largestValue = abs(cpuInvertedMatrix[(rowIndex * squareMatrixDimension) + columnIndexInMatrix]); maxValueIndex = rowIndex; } } // Update Pivot Matrix Indices and Values pivotMatrixCurrentColumnValue = cpuPivotMatrix[columnIndexInMatrix]; pivotMatrixLargestColumnValue = cpuPivotMatrix[maxValueIndex]; cpuPivotMatrix[columnIndexInMatrix] = pivotMatrixLargestColumnValue; cpuPivotMatrix[maxValueIndex] = pivotMatrixCurrentColumnValue; // Each Column Will Exchange Current Column Row Element with Pivot Element for (int columnIndex = 0; columnIndex < squareMatrixDimension; columnIndex++) { matrixCurrentColumnValue = cpuInvertedMatrix[(columnIndexInMatrix * squareMatrixDimension) + columnIndex]; matrixLargestColumnValue = cpuInvertedMatrix[(maxValueIndex * squareMatrixDimension) + columnIndex]; cpuInvertedMatrix[(columnIndexInMatrix * squareMatrixDimension) + columnIndex] = matrixLargestColumnValue; cpuInvertedMatrix[(maxValueIndex * squareMatrixDimension) + columnIndex] = matrixCurrentColumnValue; } // Perform Shurs Complement for (int rowIndex = columnIndexInMatrix + 1; rowIndex < squareMatrixDimension; rowIndex++) { cpuInvertedMatrix[(rowIndex * squareMatrixDimension) + columnIndexInMatrix] /= cpuInvertedMatrix[(columnIndexInMatrix * squareMatrixDimension) + columnIndexInMatrix]; for (int columnIndex = columnIndexInMatrix + 1; columnIndex < squareMatrixDimension; columnIndex++) { cpuInvertedMatrix[(rowIndex * squareMatrixDimension) + columnIndex] -= (cpuInvertedMatrix[(rowIndex * squareMatrixDimension) + columnIndexInMatrix] * cpuInvertedMatrix[(columnIndexInMatrix * squareMatrixDimension) + columnIndex]); } } } // Keep Track of Stop Time stop = get_time(); // Synchronize Events timeToCompleteInMs = 0; cudaEventSynchronize(stop); cudaEventElapsedTime(&timeToCompleteInMs, start, stop); // Return time required to complete return timeToCompleteInMs; } float GetCuSparseInvertedMatrixGPU(double *cpuInvertedMatrix, const double *cpuMatrix, const int squareMatrixDimension) { // Initialize Variables int batch = 1; int *info = NULL; cudaEvent_t stop; cudaEvent_t start; cublasHandle_t handle; double *gpuLUDecompositionMatrix = NULL; int *gpuPivotMatrix = NULL; double *gpuInvertedMatrix = NULL; float timeToCompleteInMs = 0; // Allocate Device Memory cudaMalloc((void **)&gpuPivotMatrix, sizeof(int)*squareMatrixDimension); cudaMalloc((void **)&gpuLUDecompositionMatrix, sizeof(double)*squareMatrixDimension*squareMatrixDimension); cudaMalloc((void **)&gpuInvertedMatrix, sizeof(double)*squareMatrixDimension*squareMatrixDimension); // Copy Data from CPU to GPU cudaMemcpy(gpuLUDecompositionMatrix, cpuMatrix, sizeof(double)*squareMatrixDimension*squareMatrixDimension, cudaMemcpyHostToDevice); cudaMemcpy(gpuInvertedMatrix, cpuInvertedMatrix, sizeof(double)*squareMatrixDimension*squareMatrixDimension, cudaMemcpyHostToDevice); // Initialize More Variables double **gpuInvertedMatrixArrayOfPointers = NULL; double **gpuLUDecompositionMatrixArrayOfPointers = NULL; double *cpuInvertedMatrixArray[] = { gpuInvertedMatrix }; double *cpuLUDecompositionMatrixArray[] = { gpuLUDecompositionMatrix }; // Create Handle cublasCreate_v2(&handle); // Allocate Memory to device arrays cudaMalloc((void **)&gpuInvertedMatrixArrayOfPointers, sizeof(cpuInvertedMatrixArray)); cudaMalloc((void **)&gpuLUDecompositionMatrixArrayOfPointers, sizeof(cpuLUDecompositionMatrixArray)); // Copy Data from CPU to GPU cudaMemcpy(gpuInvertedMatrixArrayOfPointers, cpuInvertedMatrixArray, sizeof(cpuInvertedMatrixArray), cudaMemcpyHostToDevice); cudaMemcpy(gpuLUDecompositionMatrixArrayOfPointers, cpuLUDecompositionMatrixArray, sizeof(cpuLUDecompositionMatrixArray), cudaMemcpyHostToDevice); // Keep Track of Start Time start = get_time(); // Create Buffer cudaMalloc((void **)&info, sizeof(int)); // Initiailize Memory for Info cudaMemset(info, 0, sizeof(int)); // Perform LU Decomposition cublasDgetrfBatched(handle, squareMatrixDimension, gpuLUDecompositionMatrixArrayOfPointers, squareMatrixDimension, gpuPivotMatrix, info, batch); // Compute Matrix Inverse cublasDgetriBatched(handle, squareMatrixDimension, (const double **)gpuLUDecompositionMatrixArrayOfPointers, squareMatrixDimension, gpuPivotMatrix, gpuInvertedMatrixArrayOfPointers, squareMatrixDimension, info, batch); cudaDeviceSynchronize(); // Copy results from GPU Memory to Host Memory cudaMemcpy(cpuInvertedMatrix, gpuInvertedMatrix, sizeof(double)*squareMatrixDimension*squareMatrixDimension, cudaMemcpyDeviceToHost); // Keep Track of Stop Time stop = get_time(); // Synchronize Events timeToCompleteInMs = 0; cudaEventSynchronize(stop); cudaEventElapsedTime(&timeToCompleteInMs, start, stop); // Free up allocated memory if (handle) { cublasDestroy_v2(handle); } if (gpuPivotMatrix) { cudaFree(gpuPivotMatrix); } if (gpuInvertedMatrix) { cudaFree(gpuInvertedMatrix); } if (gpuInvertedMatrixArrayOfPointers) { cudaFree(gpuInvertedMatrixArrayOfPointers); } if (gpuLUDecompositionMatrixArrayOfPointers) { cudaFree(gpuLUDecompositionMatrixArrayOfPointers); } // return time required to complete matrix inversion return timeToCompleteInMs; } float InvertCPU(double *cpuInvertedMatrix, const double *cpuMatrix, const int squareMatrixDimension) { // Initialize Variables float timeToGetLUDecompositionMatrix; float timeToInvertMatrixFromLUDecompositionAndPivotMatrix; const int numberOfElements = squareMatrixDimension * squareMatrixDimension; int *cpuPivotMatrixElementsPntr = (int *)malloc(squareMatrixDimension * sizeof(int)); double *cpuLUMatrixElementsPntr = (double *)malloc(squareMatrixDimension * squareMatrixDimension * sizeof(double)); // Initialize Pivot Matrix for (int i = 0; i < squareMatrixDimension; i++) { cpuPivotMatrixElementsPntr[i] = i; } // Add elements to matrix for (int i = 0; i < numberOfElements; i++) { cpuInvertedMatrix[i] = cpuMatrix[i]; cpuLUMatrixElementsPntr[i] = cpuMatrix[i]; } // On the CPU - Perform LU Decomposition to get LU Matrix and Pivot Matrix - returns time required to complete in ms timeToGetLUDecompositionMatrix = GetLUDecompositionMatrixCPU(cpuLUMatrixElementsPntr, cpuPivotMatrixElementsPntr, cpuMatrix, numberOfElements, squareMatrixDimension); // On the CPU - Use the LU Matrix and Pivot Matrix to get Inverte Matrix - returns time required to complete in ms timeToInvertMatrixFromLUDecompositionAndPivotMatrix = GetInvertedMatrixCPU(cpuInvertedMatrix, cpuLUMatrixElementsPntr, cpuPivotMatrixElementsPntr, squareMatrixDimension); // Accumulate all Time Required to invert Matrix on cpu return (timeToGetLUDecompositionMatrix + timeToInvertMatrixFromLUDecompositionAndPivotMatrix); } float InvertGPU(double *cpuInvertedMatrix, const double *cpuMatrix, const int squareMatrixDimension) { return GetCuSparseInvertedMatrixGPU(cpuInvertedMatrix, cpuMatrix, squareMatrixDimension); } // Main Function int main(int argc, char *argv[]) { // Print Arguments for Debugging std::cout << "Number of Arguments: " << argc << endl; std::cout << endl; // First Arg is Binary Name std::cout << "Binary Name: " << argv[0] << endl; std::cout << endl; // Second Arg is # of Threads std::cout << "Matrix Dimension: " << argv[1] << endl; std::cout << endl; // Initialize Variables std::string userInput{ "" }; bool invertSuccess = false; float cpuTimeToCompleteInMs = 0; float gpuTimeToCompleteInMs = 0; int numberOfRows = atoi(argv[1]); int numberOfColumns = atoi(argv[1]); double *cpuMatrixElementsPntr = 0; std::string cpuMatrixInversionResult{ "" }; std::string gpuMatrixInversionResult{ "" }; int numberOfElements = numberOfRows * numberOfColumns; double *cpuInvertedMatrixElementsPntrFromCPUComputation = 0; double *cpuInvertedMatrixElementsPntrFromGPUComputation = 0; int squareMatrixDimension = min(numberOfRows, numberOfColumns); // Allocate Memory cpuMatrixElementsPntr = (double *)malloc(numberOfElements * sizeof(double)); cpuInvertedMatrixElementsPntrFromGPUComputation = (double *)malloc(numberOfElements * sizeof(double)); cpuInvertedMatrixElementsPntrFromCPUComputation = (double *)malloc(numberOfElements * sizeof(double)); // Get Random Values for Elements GetRandomNumbersForMatrix(cpuMatrixElementsPntr, numberOfElements); // Print Matrix as String std::cout << "Original Matrix:" << endl; std::cout << GetMatrixAsString(cpuMatrixElementsPntr, squareMatrixDimension) << endl; std::cout << endl; // Perform GPU Matrix Inversion gpuTimeToCompleteInMs = InvertGPU(cpuInvertedMatrixElementsPntrFromGPUComputation, cpuMatrixElementsPntr, squareMatrixDimension); // Get GPU Computed Matrix Inversion as String gpuMatrixInversionResult = GetMatrixAsString(cpuInvertedMatrixElementsPntrFromGPUComputation, squareMatrixDimension); // Print Inverted Matrix (GPU) as String std::cout << "Inverted Matrix (GPU):" << endl; std::cout << gpuMatrixInversionResult << endl; std::cout << endl; // Perform CPU Matrix Inversion cpuTimeToCompleteInMs = InvertCPU(cpuInvertedMatrixElementsPntrFromCPUComputation, cpuMatrixElementsPntr, squareMatrixDimension); // Get CPU Computed Matrix Inversion as String cpuMatrixInversionResult = GetMatrixAsString(cpuInvertedMatrixElementsPntrFromCPUComputation, squareMatrixDimension); // Print Inverted Matrix (CPU) as String std::cout << "Inverted Matrix (CPU):" << endl; std::cout << cpuMatrixInversionResult << endl; std::cout << endl; // Check Results for success invertSuccess = (cpuMatrixInversionResult == gpuMatrixInversionResult); // Print out Results std::cout << "Results for Dimension " << squareMatrixDimension << ":" << endl; std::cout << " Invert Equivalent: " << ((invertSuccess == 1) ? "Success" : "Failed") << endl; std::cout << " CPU Time (ms): " << cpuTimeToCompleteInMs << endl; std::cout << " GPU Time (ms): " << gpuTimeToCompleteInMs << endl; std::cout << " Fastest: " << ((cpuTimeToCompleteInMs < gpuTimeToCompleteInMs) ? "CPU" : "GPU") << endl; std::cout << endl; // Wait for user to close application std::cout << "Press Any Button to Exit..." << endl; // Get User Input getline(cin, userInput); // return return EXIT_SUCCESS; }
cffa09069cb3c33d92eaf0d5a9f8a8b13959838f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gradient2d-512-8-128_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_8(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
cffa09069cb3c33d92eaf0d5a9f8a8b13959838f.cu
#include "gradient2d-512-8-128_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_8(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
e6e5b7bf3176bcb046fa737eef5186adc6e04e7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <depth_map_icp/cuda/internal.h> #include <depth_map_icp/cuda/vector_math.hpp> #include <depth_map_icp/cuda/containers/safe_call.hpp> struct Combined { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y }; template<int CTA_SIZE_, typename T> static __device__ __forceinline__ void reduce(volatile T* buffer) { int tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; T val = buffer[tid]; if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = val + buffer[tid + 512]; __syncthreads(); } if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = val + buffer[tid + 256]; __syncthreads(); } if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = val + buffer[tid + 128]; __syncthreads(); } if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = val + buffer[tid + 64]; __syncthreads(); } if (tid < 32) { if (CTA_SIZE_ >= 64) { buffer[tid] = val = val + buffer[tid + 32]; } if (CTA_SIZE_ >= 32) { buffer[tid] = val = val + buffer[tid + 16]; } if (CTA_SIZE_ >= 16) { buffer[tid] = val = val + buffer[tid + 8]; } if (CTA_SIZE_ >= 8) { buffer[tid] = val = val + buffer[tid + 4]; } if (CTA_SIZE_ >= 4) { buffer[tid] = val = val + buffer[tid + 2]; } if (CTA_SIZE_ >= 2) { buffer[tid] = val = val + buffer[tid + 1]; } } } Mat33 Rcurr; float3 tcurr; PtrStep<float> vmap_curr; PtrStep<float> nmap_curr; Mat33 Rprev_inv; float3 tprev; Intr intr; PtrStep<float> vmap_g_prev; PtrStep<float> nmap_g_prev; float distThres; float angleThres; int cols; int rows; mutable PtrStep<float> gbuf; mutable PtrStep<float> rbuf; __device__ __forceinline__ bool search (int x, int y, float3& n, float3& d, float3& s) const { float3 ncurr; ncurr.x = nmap_curr.ptr (y)[x]; if (isnan (ncurr.x)) return (false); float3 vcurr; vcurr.x = vmap_curr.ptr (y )[x]; vcurr.y = vmap_curr.ptr (y + rows)[x]; vcurr.z = vmap_curr.ptr (y + 2 * rows)[x]; float3 vcurr_g = Rcurr * vcurr + tcurr; float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space int2 ukr; //projection ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4 ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4 if (ukr.x < 0 || ukr.y < 0 || ukr.x >= cols || ukr.y >= rows || vcurr_cp.z < 0) return (false); float3 nprev_g; nprev_g.x = nmap_g_prev.ptr (ukr.y)[ukr.x]; if (isnan (nprev_g.x)) return (false); float3 vprev_g; vprev_g.x = vmap_g_prev.ptr (ukr.y )[ukr.x]; vprev_g.y = vmap_g_prev.ptr (ukr.y + rows)[ukr.x]; vprev_g.z = vmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]; float dist = norm (vprev_g - vcurr_g); if (dist > distThres) return (false); ncurr.y = nmap_curr.ptr (y + rows)[x]; ncurr.z = nmap_curr.ptr (y + 2 * rows)[x]; float3 ncurr_g = Rcurr * ncurr; nprev_g.y = nmap_g_prev.ptr (ukr.y + rows)[ukr.x]; nprev_g.z = nmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]; float sine = norm (cross (ncurr_g, nprev_g)); if (sine >= angleThres) return (false); n = nprev_g; d = vprev_g; s = vcurr_g; return (true); } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; float3 n, d, s; bool found_coresp = false; if (x < cols || y < rows) found_coresp = search (x, y, n, d, s); float row[7]; if(found_coresp) { float3 s_cp = Rprev_inv * (s - tprev); // prev camera coo space float3 d_cp = Rprev_inv * (d - tprev); // prev camera coo space float3 n_cp = Rprev_inv * (n); // prev camera coo space *(float3*)&row[0] = n_cp; *(float3*)&row[3] = cross (s_cp, n_cp); row[6] = dot (n_cp, s_cp - d_cp); if(isnan(row[6])) { found_coresp = false; row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f; } } else row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f; __shared__ float smem[CTA_SIZE]; int tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; int shift = 0; for (int i = 0; i < 6; ++i) //rows { #pragma unroll for (int j = i; j < 7; ++j) // cols + b { __syncthreads (); smem[tid] = row[i] * row[j]; __syncthreads (); reduce<CTA_SIZE>(smem); if (tid == 0) gbuf.ptr (shift++)[blockIdx.x + gridDim.x * blockIdx.y] = smem[0]; } } //Residual summation __shared__ float rsmem[CTA_SIZE]; shift = 0; __syncthreads (); rsmem[tid] = row[6] * row[6]; __syncthreads (); reduce<CTA_SIZE>(rsmem); if(tid == 0) rbuf.ptr(shift++)[blockIdx.x + gridDim.x * blockIdx.y] = rsmem[0]; __syncthreads (); rsmem[tid] = found_coresp; __syncthreads (); reduce<CTA_SIZE>(rsmem); if(tid == 0) rbuf.ptr(shift++)[blockIdx.x + gridDim.x * blockIdx.y] = rsmem[0]; } }; __global__ void combinedKernel(const Combined cs) { cs(); } struct TranformReduction { enum { CTA_SIZE = 512, STRIDE = CTA_SIZE, B = 6, COLS = 6, ROWS = 6, DIAG = 6, UPPER_DIAG_MAT = (COLS * ROWS - DIAG) / 2 + DIAG, TOTAL = UPPER_DIAG_MAT + B, GRID_X = TOTAL }; template<int CTA_SIZE_, typename T> static __device__ __forceinline__ void reduce(volatile T* buffer) { int tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; T val = buffer[tid]; if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = val + buffer[tid + 512]; __syncthreads(); } if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = val + buffer[tid + 256]; __syncthreads(); } if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = val + buffer[tid + 128]; __syncthreads(); } if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = val + buffer[tid + 64]; __syncthreads(); } if (tid < 32) { if (CTA_SIZE_ >= 64) { buffer[tid] = val = val + buffer[tid + 32]; } if (CTA_SIZE_ >= 32) { buffer[tid] = val = val + buffer[tid + 16]; } if (CTA_SIZE_ >= 16) { buffer[tid] = val = val + buffer[tid + 8]; } if (CTA_SIZE_ >= 8) { buffer[tid] = val = val + buffer[tid + 4]; } if (CTA_SIZE_ >= 4) { buffer[tid] = val = val + buffer[tid + 2]; } if (CTA_SIZE_ >= 2) { buffer[tid] = val = val + buffer[tid + 1]; } } } PtrStep<float> gbuf; PtrStep<float> rbuf; int length; mutable float* output; mutable float* routput; __device__ __forceinline__ void operator () () const { const float *beg = gbuf.ptr (blockIdx.x); const float *end = beg + length; int tid = threadIdx.x; float sum = 0.f; for (const float *t = beg + tid; t < end; t += STRIDE) sum += *t; __shared__ float smem[CTA_SIZE]; smem[tid] = sum; __syncthreads (); reduce<CTA_SIZE>(smem); if(tid == 0) output[blockIdx.x] = smem[0]; const float *begr = rbuf.ptr(blockIdx.x); const float *endr = begr + length; sum = 0.f; for (const float *t = begr + tid; t < endr; t += STRIDE) sum += *t; __shared__ float rsmem[CTA_SIZE]; rsmem[tid] = sum; __syncthreads (); reduce<CTA_SIZE>(rsmem); if(tid == 0) routput[blockIdx.x] = rsmem[0]; } }; __global__ void TransformEstimatorKernel2(const TranformReduction tr) { tr(); } void estimateCombined(const Mat33& Rcurr, const float3& tcurr, const DeviceArray2D<float>& vmap_curr, const DeviceArray2D<float>& nmap_curr, const Mat33& Rprev_inv, const float3& tprev, const Intr& intr, const DeviceArray2D<float>& vmap_g_prev, const DeviceArray2D<float>& nmap_g_prev, float distThres, float angleThres, DeviceArray2D<float>& gbuf, DeviceArray<float>& mbuf, float* matrixA_host, float* vectorB_host, float * residual_host) { int cols = vmap_curr.cols (); int rows = vmap_curr.rows () / 3; Combined cs; cs.Rcurr = Rcurr; cs.tcurr = tcurr; cs.vmap_curr = vmap_curr; cs.nmap_curr = nmap_curr; cs.Rprev_inv = Rprev_inv; cs.tprev = tprev; cs.intr = intr; cs.vmap_g_prev = vmap_g_prev; cs.nmap_g_prev = nmap_g_prev; cs.distThres = distThres; cs.angleThres = angleThres; cs.cols = cols; cs.rows = rows; dim3 block (Combined::CTA_SIZE_X, Combined::CTA_SIZE_Y); dim3 grid (1, 1, 1); grid.x = divUp (cols, block.x); grid.y = divUp (rows, block.y); mbuf.create (TranformReduction::TOTAL); if(gbuf.rows () != TranformReduction::TOTAL || gbuf.cols () < (int)(grid.x * grid.y)) { gbuf.create (TranformReduction::TOTAL, grid.x * grid.y); } cs.gbuf = gbuf; static DeviceArray2D<float> rbuf; rbuf.create(2, grid.x * grid.y); static DeviceArray<float> rsbuf; rsbuf.create(2); cs.rbuf = rbuf; hipLaunchKernelGGL(( combinedKernel), dim3(grid), dim3(block), 0, 0, cs); cudaSafeCall ( hipGetLastError () ); TranformReduction tr; tr.gbuf = gbuf; tr.rbuf = rbuf; tr.length = grid.x * grid.y; tr.output = mbuf; tr.routput = rsbuf; hipLaunchKernelGGL(( TransformEstimatorKernel2), dim3(TranformReduction::TOTAL), dim3(TranformReduction::CTA_SIZE), 0, 0, tr); cudaSafeCall (hipGetLastError ()); cudaSafeCall (hipDeviceSynchronize ()); float host_data[TranformReduction::TOTAL]; mbuf.download (host_data); int shift = 0; for (int i = 0; i < 6; ++i) //rows { for (int j = i; j < 7; ++j) // cols + b { float value = host_data[shift++]; if (j == 6) // vector b vectorB_host[i] = value; else matrixA_host[j * 6 + i] = matrixA_host[i * 6 + j] = value; } } rsbuf.download(residual_host); }
e6e5b7bf3176bcb046fa737eef5186adc6e04e7d.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <depth_map_icp/cuda/internal.h> #include <depth_map_icp/cuda/vector_math.hpp> #include <depth_map_icp/cuda/containers/safe_call.hpp> struct Combined { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y }; template<int CTA_SIZE_, typename T> static __device__ __forceinline__ void reduce(volatile T* buffer) { int tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; T val = buffer[tid]; if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = val + buffer[tid + 512]; __syncthreads(); } if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = val + buffer[tid + 256]; __syncthreads(); } if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = val + buffer[tid + 128]; __syncthreads(); } if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = val + buffer[tid + 64]; __syncthreads(); } if (tid < 32) { if (CTA_SIZE_ >= 64) { buffer[tid] = val = val + buffer[tid + 32]; } if (CTA_SIZE_ >= 32) { buffer[tid] = val = val + buffer[tid + 16]; } if (CTA_SIZE_ >= 16) { buffer[tid] = val = val + buffer[tid + 8]; } if (CTA_SIZE_ >= 8) { buffer[tid] = val = val + buffer[tid + 4]; } if (CTA_SIZE_ >= 4) { buffer[tid] = val = val + buffer[tid + 2]; } if (CTA_SIZE_ >= 2) { buffer[tid] = val = val + buffer[tid + 1]; } } } Mat33 Rcurr; float3 tcurr; PtrStep<float> vmap_curr; PtrStep<float> nmap_curr; Mat33 Rprev_inv; float3 tprev; Intr intr; PtrStep<float> vmap_g_prev; PtrStep<float> nmap_g_prev; float distThres; float angleThres; int cols; int rows; mutable PtrStep<float> gbuf; mutable PtrStep<float> rbuf; __device__ __forceinline__ bool search (int x, int y, float3& n, float3& d, float3& s) const { float3 ncurr; ncurr.x = nmap_curr.ptr (y)[x]; if (isnan (ncurr.x)) return (false); float3 vcurr; vcurr.x = vmap_curr.ptr (y )[x]; vcurr.y = vmap_curr.ptr (y + rows)[x]; vcurr.z = vmap_curr.ptr (y + 2 * rows)[x]; float3 vcurr_g = Rcurr * vcurr + tcurr; float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space int2 ukr; //projection ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4 ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4 if (ukr.x < 0 || ukr.y < 0 || ukr.x >= cols || ukr.y >= rows || vcurr_cp.z < 0) return (false); float3 nprev_g; nprev_g.x = nmap_g_prev.ptr (ukr.y)[ukr.x]; if (isnan (nprev_g.x)) return (false); float3 vprev_g; vprev_g.x = vmap_g_prev.ptr (ukr.y )[ukr.x]; vprev_g.y = vmap_g_prev.ptr (ukr.y + rows)[ukr.x]; vprev_g.z = vmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]; float dist = norm (vprev_g - vcurr_g); if (dist > distThres) return (false); ncurr.y = nmap_curr.ptr (y + rows)[x]; ncurr.z = nmap_curr.ptr (y + 2 * rows)[x]; float3 ncurr_g = Rcurr * ncurr; nprev_g.y = nmap_g_prev.ptr (ukr.y + rows)[ukr.x]; nprev_g.z = nmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]; float sine = norm (cross (ncurr_g, nprev_g)); if (sine >= angleThres) return (false); n = nprev_g; d = vprev_g; s = vcurr_g; return (true); } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; float3 n, d, s; bool found_coresp = false; if (x < cols || y < rows) found_coresp = search (x, y, n, d, s); float row[7]; if(found_coresp) { float3 s_cp = Rprev_inv * (s - tprev); // prev camera coo space float3 d_cp = Rprev_inv * (d - tprev); // prev camera coo space float3 n_cp = Rprev_inv * (n); // prev camera coo space *(float3*)&row[0] = n_cp; *(float3*)&row[3] = cross (s_cp, n_cp); row[6] = dot (n_cp, s_cp - d_cp); if(isnan(row[6])) { found_coresp = false; row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f; } } else row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f; __shared__ float smem[CTA_SIZE]; int tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; int shift = 0; for (int i = 0; i < 6; ++i) //rows { #pragma unroll for (int j = i; j < 7; ++j) // cols + b { __syncthreads (); smem[tid] = row[i] * row[j]; __syncthreads (); reduce<CTA_SIZE>(smem); if (tid == 0) gbuf.ptr (shift++)[blockIdx.x + gridDim.x * blockIdx.y] = smem[0]; } } //Residual summation __shared__ float rsmem[CTA_SIZE]; shift = 0; __syncthreads (); rsmem[tid] = row[6] * row[6]; __syncthreads (); reduce<CTA_SIZE>(rsmem); if(tid == 0) rbuf.ptr(shift++)[blockIdx.x + gridDim.x * blockIdx.y] = rsmem[0]; __syncthreads (); rsmem[tid] = found_coresp; __syncthreads (); reduce<CTA_SIZE>(rsmem); if(tid == 0) rbuf.ptr(shift++)[blockIdx.x + gridDim.x * blockIdx.y] = rsmem[0]; } }; __global__ void combinedKernel(const Combined cs) { cs(); } struct TranformReduction { enum { CTA_SIZE = 512, STRIDE = CTA_SIZE, B = 6, COLS = 6, ROWS = 6, DIAG = 6, UPPER_DIAG_MAT = (COLS * ROWS - DIAG) / 2 + DIAG, TOTAL = UPPER_DIAG_MAT + B, GRID_X = TOTAL }; template<int CTA_SIZE_, typename T> static __device__ __forceinline__ void reduce(volatile T* buffer) { int tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; T val = buffer[tid]; if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = val + buffer[tid + 512]; __syncthreads(); } if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = val + buffer[tid + 256]; __syncthreads(); } if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = val + buffer[tid + 128]; __syncthreads(); } if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = val + buffer[tid + 64]; __syncthreads(); } if (tid < 32) { if (CTA_SIZE_ >= 64) { buffer[tid] = val = val + buffer[tid + 32]; } if (CTA_SIZE_ >= 32) { buffer[tid] = val = val + buffer[tid + 16]; } if (CTA_SIZE_ >= 16) { buffer[tid] = val = val + buffer[tid + 8]; } if (CTA_SIZE_ >= 8) { buffer[tid] = val = val + buffer[tid + 4]; } if (CTA_SIZE_ >= 4) { buffer[tid] = val = val + buffer[tid + 2]; } if (CTA_SIZE_ >= 2) { buffer[tid] = val = val + buffer[tid + 1]; } } } PtrStep<float> gbuf; PtrStep<float> rbuf; int length; mutable float* output; mutable float* routput; __device__ __forceinline__ void operator () () const { const float *beg = gbuf.ptr (blockIdx.x); const float *end = beg + length; int tid = threadIdx.x; float sum = 0.f; for (const float *t = beg + tid; t < end; t += STRIDE) sum += *t; __shared__ float smem[CTA_SIZE]; smem[tid] = sum; __syncthreads (); reduce<CTA_SIZE>(smem); if(tid == 0) output[blockIdx.x] = smem[0]; const float *begr = rbuf.ptr(blockIdx.x); const float *endr = begr + length; sum = 0.f; for (const float *t = begr + tid; t < endr; t += STRIDE) sum += *t; __shared__ float rsmem[CTA_SIZE]; rsmem[tid] = sum; __syncthreads (); reduce<CTA_SIZE>(rsmem); if(tid == 0) routput[blockIdx.x] = rsmem[0]; } }; __global__ void TransformEstimatorKernel2(const TranformReduction tr) { tr(); } void estimateCombined(const Mat33& Rcurr, const float3& tcurr, const DeviceArray2D<float>& vmap_curr, const DeviceArray2D<float>& nmap_curr, const Mat33& Rprev_inv, const float3& tprev, const Intr& intr, const DeviceArray2D<float>& vmap_g_prev, const DeviceArray2D<float>& nmap_g_prev, float distThres, float angleThres, DeviceArray2D<float>& gbuf, DeviceArray<float>& mbuf, float* matrixA_host, float* vectorB_host, float * residual_host) { int cols = vmap_curr.cols (); int rows = vmap_curr.rows () / 3; Combined cs; cs.Rcurr = Rcurr; cs.tcurr = tcurr; cs.vmap_curr = vmap_curr; cs.nmap_curr = nmap_curr; cs.Rprev_inv = Rprev_inv; cs.tprev = tprev; cs.intr = intr; cs.vmap_g_prev = vmap_g_prev; cs.nmap_g_prev = nmap_g_prev; cs.distThres = distThres; cs.angleThres = angleThres; cs.cols = cols; cs.rows = rows; dim3 block (Combined::CTA_SIZE_X, Combined::CTA_SIZE_Y); dim3 grid (1, 1, 1); grid.x = divUp (cols, block.x); grid.y = divUp (rows, block.y); mbuf.create (TranformReduction::TOTAL); if(gbuf.rows () != TranformReduction::TOTAL || gbuf.cols () < (int)(grid.x * grid.y)) { gbuf.create (TranformReduction::TOTAL, grid.x * grid.y); } cs.gbuf = gbuf; static DeviceArray2D<float> rbuf; rbuf.create(2, grid.x * grid.y); static DeviceArray<float> rsbuf; rsbuf.create(2); cs.rbuf = rbuf; combinedKernel<<<grid, block>>>(cs); cudaSafeCall ( cudaGetLastError () ); TranformReduction tr; tr.gbuf = gbuf; tr.rbuf = rbuf; tr.length = grid.x * grid.y; tr.output = mbuf; tr.routput = rsbuf; TransformEstimatorKernel2<<<TranformReduction::TOTAL, TranformReduction::CTA_SIZE>>>(tr); cudaSafeCall (cudaGetLastError ()); cudaSafeCall (cudaDeviceSynchronize ()); float host_data[TranformReduction::TOTAL]; mbuf.download (host_data); int shift = 0; for (int i = 0; i < 6; ++i) //rows { for (int j = i; j < 7; ++j) // cols + b { float value = host_data[shift++]; if (j == 6) // vector b vectorB_host[i] = value; else matrixA_host[j * 6 + i] = matrixA_host[i * 6 + j] = value; } } rsbuf.download(residual_host); }
7be1dbb719281c7e5767c4c300c2e9d95d0e8e6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #ifndef __HIPCC__ #define __HIPCC__ #endif #include "hip/device_functions.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #define DEFAULT_THRESHOLD 8000 #define DEFAULT_FILENAME "BWstop-sign.ppm" #define MASK_WIDTH 3 #define HALF_MASK_WIDTH (MASK_WIDTH/2) #define TILE_WIDTH 16 #define INPUT_TILE_WIDTH (TILE_WIDTH+(2*HALF_MASK_WIDTH)) __constant__ int Mx[MASK_WIDTH*MASK_WIDTH]; __constant__ int My[MASK_WIDTH*MASK_WIDTH]; __global__ void filter(int* output, const unsigned int* const input, const int xsize, const int ysize, const int thresh) { __shared__ int S[INPUT_TILE_WIDTH*INPUT_TILE_WIDTH]; int2 global = make_int2(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); unsigned int t = global.y*xsize + global.x; int2 tile = make_int2(threadIdx.x + HALF_MASK_WIDTH, threadIdx.y + HALF_MASK_WIDTH); if (global.x >= xsize || global.y >= ysize) { return; } // Offset by half tile width in shared memory becuase of halo, px(0,0) stored at s[1,1] S[(tile.y)*INPUT_TILE_WIDTH + tile.x] = input[t]; // Border threads fetch the halo or ghost pixels // Left halo if (threadIdx.x < HALF_MASK_WIDTH) { S[(tile.y)*INPUT_TILE_WIDTH + threadIdx.x] = global.x < HALF_MASK_WIDTH ? 0 : input[t - HALF_MASK_WIDTH]; // Top Left corner if (threadIdx.y < HALF_MASK_WIDTH) { S[(threadIdx.y)*INPUT_TILE_WIDTH + threadIdx.x] = (global.y < HALF_MASK_WIDTH || global.x < HALF_MASK_WIDTH) ? 0 : input[t - HALF_MASK_WIDTH - xsize]; } } // Top Halo if (threadIdx.y < HALF_MASK_WIDTH) { S[(threadIdx.y)*INPUT_TILE_WIDTH + tile.x] = global.y < HALF_MASK_WIDTH ? 0 : input[t - xsize]; // Top Right corner if (threadIdx.x >= blockDim.x - HALF_MASK_WIDTH) { S[(threadIdx.y)*INPUT_TILE_WIDTH + tile.x + HALF_MASK_WIDTH] = global.y < HALF_MASK_WIDTH || global.x >= xsize - HALF_MASK_WIDTH ? 0 : input[t - xsize + HALF_MASK_WIDTH]; } } // Right Halo if (threadIdx.x >= blockDim.x - HALF_MASK_WIDTH) { S[(tile.y)*INPUT_TILE_WIDTH + tile.x + HALF_MASK_WIDTH] = global.x >= xsize - HALF_MASK_WIDTH ? 0 : input[t + HALF_MASK_WIDTH]; // Bottom Right Corner if (threadIdx.y >= blockDim.y - HALF_MASK_WIDTH) { S[(tile.y + HALF_MASK_WIDTH)*INPUT_TILE_WIDTH + tile.x + HALF_MASK_WIDTH] = global.x >= xsize - HALF_MASK_WIDTH || global.y >= ysize - HALF_MASK_WIDTH ? 0 : input[t + HALF_MASK_WIDTH + xsize]; } } // Bottom Halo if (threadIdx.y >= blockDim.y - HALF_MASK_WIDTH) { S[(tile.y + HALF_MASK_WIDTH)*INPUT_TILE_WIDTH + tile.x] = global.y >= ysize - HALF_MASK_WIDTH ? 0 : input[t + xsize]; // Bottom Left corner if (threadIdx.x < HALF_MASK_WIDTH) { S[(tile.y + HALF_MASK_WIDTH)*INPUT_TILE_WIDTH + threadIdx.x] = global.x < HALF_MASK_WIDTH || global.y >= ysize - HALF_MASK_WIDTH ? 0 : input[t + xsize - HALF_MASK_WIDTH]; } } __syncthreads(); // HALF_MASK_WIDTH border if (global.x < HALF_MASK_WIDTH || global.x >= xsize - HALF_MASK_WIDTH || global.y <HALF_MASK_WIDTH || global.y >= ysize - HALF_MASK_WIDTH) { return; } int px = 0; int py = 0; int start_x = tile.x - HALF_MASK_WIDTH; int start_y = tile.y - HALF_MASK_WIDTH; for (int r = 0; r < MASK_WIDTH; r++) { int row = start_y + r; for (int c = 0; c < MASK_WIDTH; c++) { int col = start_x + c; px += S[row*INPUT_TILE_WIDTH + col] * Mx[r*MASK_WIDTH + c]; py += S[row*INPUT_TILE_WIDTH + col] * My[r*MASK_WIDTH + c]; } } if ((px*px + py*py) > thresh) { output[t] = 255; } else { output[t] = 0; } } unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){ if ( !filename || filename[0] == '\0') { fprintf(stderr, "read_ppm but no file name\n"); return NULL; // fail } FILE *fp; fprintf(stderr, "read_ppm( %s )\n", filename); fp = fopen( filename, "rb"); if (!fp) { fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename); return NULL; // fail } char chars[1024]; //int num = read(fd, chars, 1000); int num = fread(chars, sizeof(char), 1000, fp); if (chars[0] != 'P' || chars[1] != '6') { fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename); return NULL; } unsigned int width, height, maxvalue; char *ptr = chars+3; // P 6 newline if (*ptr == '#') // comment line! { ptr = 1 + strstr(ptr, "\n"); } num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue); fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue); *xsize = width; *ysize = height; *maxval = maxvalue; unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int)); if (!pic) { fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height); return NULL; // fail but return } // allocate buffer to read the rest of the file into int bufsize = 3 * width * height * sizeof(unsigned char); if ((*maxval) > 255) bufsize *= 2; unsigned char *buf = (unsigned char *)malloc( bufsize ); if (!buf) { fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize); return NULL; // fail but return } // really read char duh[80]; char *line = chars; // find the start of the pixel data. sprintf(duh, "%d\0", *xsize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", *ysize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", *maxval); line = strstr(line, duh); fprintf(stderr, "%s found at offset %d\n", duh, line - chars); line += strlen(duh) + 1; long offset = line - chars; //lseek(fd, offset, SEEK_SET); // move to the correct offset fseek(fp, offset, SEEK_SET); // move to the correct offset //long numread = read(fd, buf, bufsize); long numread = fread(buf, sizeof(char), bufsize, fp); fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize); fclose(fp); int pixels = (*xsize) * (*ysize); for (int i=0; i<pixels; i++) pic[i] = (int) buf[3*i]; // red channel return pic; // success } void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic) { FILE *fp; int x,y; fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n", filename); exit(-1); } fprintf(fp, "P6\n"); fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval); int numpix = xsize * ysize; for (int i=0; i<numpix; i++) { unsigned char uc = (unsigned char) pic[i]; fprintf(fp, "%c%c%c", uc, uc, uc); } fclose(fp); } void compareImages(int* expected, int* actual, int xsize, int ysize) { char* result = "PASSED"; for(int i=0;i<ysize;i++) { for(int j=0; j<xsize;j++) { if(expected[i*xsize + j] !=actual[i*xsize+j]) { printf("Mismatch at (%d, %d): Expected/Actual = %d/%d\n", i, j, expected[i*xsize + j], actual[i*xsize + j]); result = "FAILED"; } } } printf("Test %s\n", result); } int main(int argc, char **argv) { int thresh = DEFAULT_THRESHOLD; char *filename; filename = strdup(DEFAULT_FILENAME); if (argc > 1) { if (argc == 3) { // filename AND threshold filename = strdup(argv[1]); thresh = atoi(argv[2]); } if (argc == 2) { // default file but specified threshhold thresh = atoi(argv[1]); } fprintf(stderr, "file %s threshold %d\n", filename, thresh); } int xsize, ysize, maxval; unsigned int *pic = read_ppm(filename, &xsize, &ysize, &maxval); printf("Compute Gold start\n"); int numbytes = xsize * ysize * sizeof(int); int *result = (int *)malloc(numbytes); if (!result) { fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes); exit(-1); // fail } int i, j, magnitude, sum1, sum2; for (int row = 0; row < ysize; row++) { for (int col = 0; col < xsize; col++) { result[row*xsize + col] = 0; } } for (i = 1; i < ysize - 1; i++) { for (j = 1; j < xsize - 1; j++) { int offset = i*xsize + j; sum1 = pic[xsize * (i - 1) + j + 1] - pic[xsize*(i - 1) + j - 1] + 2 * pic[xsize * (i)+j + 1] - 2 * pic[xsize*(i)+j - 1] + pic[xsize * (i + 1) + j + 1] - pic[xsize*(i + 1) + j - 1]; sum2 = pic[xsize * (i - 1) + j - 1] + 2 * pic[xsize * (i - 1) + j] + pic[xsize * (i - 1) + j + 1] - pic[xsize * (i + 1) + j - 1] - 2 * pic[xsize * (i + 1) + j] - pic[xsize * (i + 1) + j + 1]; magnitude = sum1*sum1 + sum2*sum2; if (magnitude > thresh) result[offset] = 255; else result[offset] = 0; } } printf("Compute Gold Complete\n"); write_ppm("result8000gold.ppm", xsize, ysize, 255, result); printf("Compute CUDA start\n"); int h_Mx[MASK_WIDTH*MASK_WIDTH] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 }; int h_My[MASK_WIDTH*MASK_WIDTH] = { -1, -2, -1, 0, 0, 0, 1, 2, 1 }; /*int h_Mx[MASK_WIDTH*MASK_WIDTH] = { -2, -1, 0, 1, 2, -3, -2, 0, 2, 3, -4, -3, 0, 3, 4, -3, -2, 0, 2, 3, -2, -1, 0, 1, 2, }; int h_My[MASK_WIDTH*MASK_WIDTH] = { -2, -3, -4, -3, -2, -1, -2, -3, -2, -1, 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 2, 3, 4, 3, 2 };*/ hipMemcpyToSymbol(Mx, h_Mx, MASK_WIDTH*MASK_WIDTH * sizeof(int)); hipMemcpyToSymbol(My, h_My, MASK_WIDTH *MASK_WIDTH * sizeof(int)); int channelBytes = xsize*ysize * sizeof(int); unsigned int *d_pic; int* h_result, *d_result; h_result = (int*)malloc(channelBytes); hipMalloc(&d_pic, xsize*ysize * sizeof(unsigned int)); hipMalloc(&d_result, channelBytes); hipMemcpy(d_pic, pic, xsize*ysize * sizeof(unsigned int), hipMemcpyHostToDevice); dim3 blockSize(TILE_WIDTH, TILE_WIDTH); dim3 gridSize((int)ceil((float)xsize / blockSize.x), (int)ceil((float)ysize / blockSize.y), 1); hipLaunchKernelGGL(( filter), dim3(gridSize), dim3(blockSize), 0, 0, d_result, d_pic, xsize, ysize, thresh); hipMemcpy(h_result, d_result, channelBytes, hipMemcpyDeviceToHost); hipDeviceSynchronize(); printf("Compute CUDA Complete\n"); write_ppm("result_cuda.ppm", xsize, ysize, 255, h_result); compareImages(result, h_result, xsize, ysize); hipFree(d_pic); hipFree(d_result); free(pic); free(result); free(h_result); fprintf(stderr, "sobel done\n"); }
7be1dbb719281c7e5767c4c300c2e9d95d0e8e6d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #ifndef __CUDACC__ #define __CUDACC__ #endif #include "device_functions.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #define DEFAULT_THRESHOLD 8000 #define DEFAULT_FILENAME "BWstop-sign.ppm" #define MASK_WIDTH 3 #define HALF_MASK_WIDTH (MASK_WIDTH/2) #define TILE_WIDTH 16 #define INPUT_TILE_WIDTH (TILE_WIDTH+(2*HALF_MASK_WIDTH)) __constant__ int Mx[MASK_WIDTH*MASK_WIDTH]; __constant__ int My[MASK_WIDTH*MASK_WIDTH]; __global__ void filter(int* output, const unsigned int* const input, const int xsize, const int ysize, const int thresh) { __shared__ int S[INPUT_TILE_WIDTH*INPUT_TILE_WIDTH]; int2 global = make_int2(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); unsigned int t = global.y*xsize + global.x; int2 tile = make_int2(threadIdx.x + HALF_MASK_WIDTH, threadIdx.y + HALF_MASK_WIDTH); if (global.x >= xsize || global.y >= ysize) { return; } // Offset by half tile width in shared memory becuase of halo, px(0,0) stored at s[1,1] S[(tile.y)*INPUT_TILE_WIDTH + tile.x] = input[t]; // Border threads fetch the halo or ghost pixels // Left halo if (threadIdx.x < HALF_MASK_WIDTH) { S[(tile.y)*INPUT_TILE_WIDTH + threadIdx.x] = global.x < HALF_MASK_WIDTH ? 0 : input[t - HALF_MASK_WIDTH]; // Top Left corner if (threadIdx.y < HALF_MASK_WIDTH) { S[(threadIdx.y)*INPUT_TILE_WIDTH + threadIdx.x] = (global.y < HALF_MASK_WIDTH || global.x < HALF_MASK_WIDTH) ? 0 : input[t - HALF_MASK_WIDTH - xsize]; } } // Top Halo if (threadIdx.y < HALF_MASK_WIDTH) { S[(threadIdx.y)*INPUT_TILE_WIDTH + tile.x] = global.y < HALF_MASK_WIDTH ? 0 : input[t - xsize]; // Top Right corner if (threadIdx.x >= blockDim.x - HALF_MASK_WIDTH) { S[(threadIdx.y)*INPUT_TILE_WIDTH + tile.x + HALF_MASK_WIDTH] = global.y < HALF_MASK_WIDTH || global.x >= xsize - HALF_MASK_WIDTH ? 0 : input[t - xsize + HALF_MASK_WIDTH]; } } // Right Halo if (threadIdx.x >= blockDim.x - HALF_MASK_WIDTH) { S[(tile.y)*INPUT_TILE_WIDTH + tile.x + HALF_MASK_WIDTH] = global.x >= xsize - HALF_MASK_WIDTH ? 0 : input[t + HALF_MASK_WIDTH]; // Bottom Right Corner if (threadIdx.y >= blockDim.y - HALF_MASK_WIDTH) { S[(tile.y + HALF_MASK_WIDTH)*INPUT_TILE_WIDTH + tile.x + HALF_MASK_WIDTH] = global.x >= xsize - HALF_MASK_WIDTH || global.y >= ysize - HALF_MASK_WIDTH ? 0 : input[t + HALF_MASK_WIDTH + xsize]; } } // Bottom Halo if (threadIdx.y >= blockDim.y - HALF_MASK_WIDTH) { S[(tile.y + HALF_MASK_WIDTH)*INPUT_TILE_WIDTH + tile.x] = global.y >= ysize - HALF_MASK_WIDTH ? 0 : input[t + xsize]; // Bottom Left corner if (threadIdx.x < HALF_MASK_WIDTH) { S[(tile.y + HALF_MASK_WIDTH)*INPUT_TILE_WIDTH + threadIdx.x] = global.x < HALF_MASK_WIDTH || global.y >= ysize - HALF_MASK_WIDTH ? 0 : input[t + xsize - HALF_MASK_WIDTH]; } } __syncthreads(); // HALF_MASK_WIDTH border if (global.x < HALF_MASK_WIDTH || global.x >= xsize - HALF_MASK_WIDTH || global.y <HALF_MASK_WIDTH || global.y >= ysize - HALF_MASK_WIDTH) { return; } int px = 0; int py = 0; int start_x = tile.x - HALF_MASK_WIDTH; int start_y = tile.y - HALF_MASK_WIDTH; for (int r = 0; r < MASK_WIDTH; r++) { int row = start_y + r; for (int c = 0; c < MASK_WIDTH; c++) { int col = start_x + c; px += S[row*INPUT_TILE_WIDTH + col] * Mx[r*MASK_WIDTH + c]; py += S[row*INPUT_TILE_WIDTH + col] * My[r*MASK_WIDTH + c]; } } if ((px*px + py*py) > thresh) { output[t] = 255; } else { output[t] = 0; } } unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){ if ( !filename || filename[0] == '\0') { fprintf(stderr, "read_ppm but no file name\n"); return NULL; // fail } FILE *fp; fprintf(stderr, "read_ppm( %s )\n", filename); fp = fopen( filename, "rb"); if (!fp) { fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename); return NULL; // fail } char chars[1024]; //int num = read(fd, chars, 1000); int num = fread(chars, sizeof(char), 1000, fp); if (chars[0] != 'P' || chars[1] != '6') { fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename); return NULL; } unsigned int width, height, maxvalue; char *ptr = chars+3; // P 6 newline if (*ptr == '#') // comment line! { ptr = 1 + strstr(ptr, "\n"); } num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue); fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue); *xsize = width; *ysize = height; *maxval = maxvalue; unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int)); if (!pic) { fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height); return NULL; // fail but return } // allocate buffer to read the rest of the file into int bufsize = 3 * width * height * sizeof(unsigned char); if ((*maxval) > 255) bufsize *= 2; unsigned char *buf = (unsigned char *)malloc( bufsize ); if (!buf) { fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize); return NULL; // fail but return } // really read char duh[80]; char *line = chars; // find the start of the pixel data. sprintf(duh, "%d\0", *xsize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", *ysize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", *maxval); line = strstr(line, duh); fprintf(stderr, "%s found at offset %d\n", duh, line - chars); line += strlen(duh) + 1; long offset = line - chars; //lseek(fd, offset, SEEK_SET); // move to the correct offset fseek(fp, offset, SEEK_SET); // move to the correct offset //long numread = read(fd, buf, bufsize); long numread = fread(buf, sizeof(char), bufsize, fp); fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize); fclose(fp); int pixels = (*xsize) * (*ysize); for (int i=0; i<pixels; i++) pic[i] = (int) buf[3*i]; // red channel return pic; // success } void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic) { FILE *fp; int x,y; fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n", filename); exit(-1); } fprintf(fp, "P6\n"); fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval); int numpix = xsize * ysize; for (int i=0; i<numpix; i++) { unsigned char uc = (unsigned char) pic[i]; fprintf(fp, "%c%c%c", uc, uc, uc); } fclose(fp); } void compareImages(int* expected, int* actual, int xsize, int ysize) { char* result = "PASSED"; for(int i=0;i<ysize;i++) { for(int j=0; j<xsize;j++) { if(expected[i*xsize + j] !=actual[i*xsize+j]) { printf("Mismatch at (%d, %d): Expected/Actual = %d/%d\n", i, j, expected[i*xsize + j], actual[i*xsize + j]); result = "FAILED"; } } } printf("Test %s\n", result); } int main(int argc, char **argv) { int thresh = DEFAULT_THRESHOLD; char *filename; filename = strdup(DEFAULT_FILENAME); if (argc > 1) { if (argc == 3) { // filename AND threshold filename = strdup(argv[1]); thresh = atoi(argv[2]); } if (argc == 2) { // default file but specified threshhold thresh = atoi(argv[1]); } fprintf(stderr, "file %s threshold %d\n", filename, thresh); } int xsize, ysize, maxval; unsigned int *pic = read_ppm(filename, &xsize, &ysize, &maxval); printf("Compute Gold start\n"); int numbytes = xsize * ysize * sizeof(int); int *result = (int *)malloc(numbytes); if (!result) { fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes); exit(-1); // fail } int i, j, magnitude, sum1, sum2; for (int row = 0; row < ysize; row++) { for (int col = 0; col < xsize; col++) { result[row*xsize + col] = 0; } } for (i = 1; i < ysize - 1; i++) { for (j = 1; j < xsize - 1; j++) { int offset = i*xsize + j; sum1 = pic[xsize * (i - 1) + j + 1] - pic[xsize*(i - 1) + j - 1] + 2 * pic[xsize * (i)+j + 1] - 2 * pic[xsize*(i)+j - 1] + pic[xsize * (i + 1) + j + 1] - pic[xsize*(i + 1) + j - 1]; sum2 = pic[xsize * (i - 1) + j - 1] + 2 * pic[xsize * (i - 1) + j] + pic[xsize * (i - 1) + j + 1] - pic[xsize * (i + 1) + j - 1] - 2 * pic[xsize * (i + 1) + j] - pic[xsize * (i + 1) + j + 1]; magnitude = sum1*sum1 + sum2*sum2; if (magnitude > thresh) result[offset] = 255; else result[offset] = 0; } } printf("Compute Gold Complete\n"); write_ppm("result8000gold.ppm", xsize, ysize, 255, result); printf("Compute CUDA start\n"); int h_Mx[MASK_WIDTH*MASK_WIDTH] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 }; int h_My[MASK_WIDTH*MASK_WIDTH] = { -1, -2, -1, 0, 0, 0, 1, 2, 1 }; /*int h_Mx[MASK_WIDTH*MASK_WIDTH] = { -2, -1, 0, 1, 2, -3, -2, 0, 2, 3, -4, -3, 0, 3, 4, -3, -2, 0, 2, 3, -2, -1, 0, 1, 2, }; int h_My[MASK_WIDTH*MASK_WIDTH] = { -2, -3, -4, -3, -2, -1, -2, -3, -2, -1, 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 2, 3, 4, 3, 2 };*/ cudaMemcpyToSymbol(Mx, h_Mx, MASK_WIDTH*MASK_WIDTH * sizeof(int)); cudaMemcpyToSymbol(My, h_My, MASK_WIDTH *MASK_WIDTH * sizeof(int)); int channelBytes = xsize*ysize * sizeof(int); unsigned int *d_pic; int* h_result, *d_result; h_result = (int*)malloc(channelBytes); cudaMalloc(&d_pic, xsize*ysize * sizeof(unsigned int)); cudaMalloc(&d_result, channelBytes); cudaMemcpy(d_pic, pic, xsize*ysize * sizeof(unsigned int), cudaMemcpyHostToDevice); dim3 blockSize(TILE_WIDTH, TILE_WIDTH); dim3 gridSize((int)ceil((float)xsize / blockSize.x), (int)ceil((float)ysize / blockSize.y), 1); filter<<<gridSize, blockSize>>>(d_result, d_pic, xsize, ysize, thresh); cudaMemcpy(h_result, d_result, channelBytes, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); printf("Compute CUDA Complete\n"); write_ppm("result_cuda.ppm", xsize, ysize, 255, h_result); compareImages(result, h_result, xsize, ysize); cudaFree(d_pic); cudaFree(d_result); free(pic); free(result); free(h_result); fprintf(stderr, "sobel done\n"); }
d5650b820166c61c92e9f4b6823648a60c2dbd73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "multigrid_kernel.cu" #include <stdio.h> #define N_MALLAS 12 #define BLOCK_SIZE 16 void g_imprime(Grid g); void multigrid(Grid *u, Grid *f, Grid *v, Grid *d,int nivel, float *max,int * iter); void imprime_malla(float * f, int dim, const char * nombre) { FILE * fil; fil=fopen(nombre,"w"); int i,j; float h=1.0/(dim-1); for(i=0;i<dim;i++) { for(j=0;j<=i;j++) { fprintf(fil,"%f %f %f\n",1.0*j*h,1.0-1.0*i*h, f[IDT(i,j)]); } fprintf(fil,"\n"); } fclose(fil); } int main() { int i; int dim; int size; float max=100; float max_ant; int sizetotal=0; /* Definicion de las Mallas */ Grid u[N_MALLAS]; Grid f[N_MALLAS]; Grid v[N_MALLAS]; Grid d[N_MALLAS]; /* Reservamos la memoria */ for(i=2;i<N_MALLAS;i++) { dim=pow(2,i)+1; //Dim es el nmero de elementos de la "diagonal" size=((dim-1)*(dim-1)+3*(dim-1))/2+1; u[i].dim=dim; f[i].dim=dim; v[i].dim=dim; d[i].dim=dim; u[i].size=size; f[i].size=size; v[i].size=size; d[i].size=size; hipMalloc(&u[i].v,size*sizeof(float)); hipMalloc(&f[i].v,size*sizeof(float)); hipMalloc(&v[i].v,size*sizeof(float)); hipMalloc(&d[i].v,size*sizeof(float)); sizetotal=sizetotal+4*size; } /* Para la llamada a CUDA */ int m=N_MALLAS-1; dim=(int)pow(2,m)+1; dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 dimGrid((dim+BLOCK_SIZE-1)/dimBlock.x,(dim+BLOCK_SIZE-1)/dimBlock.y); printf("%d %d %d %d\n",dimBlock.x, dimBlock.y,dimGrid.x,dimGrid.y); printf("Necesitamos %d kb:\n" ,sizetotal*sizeof(float)/1024); /* Inicializamos la malla de la funcion */ hipLaunchKernelGGL(( cero), dim3(dimGrid),dim3(dimBlock), 0, 0, f[m]); /* Initialize u with random values */ hipLaunchKernelGGL(( random), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m]); /* Principal */ int iter=0; for(i=0;i<20;i++) { max_ant=max; max=0.0; multigrid(&u[0],&f[0],&v[0],&d[0],m,&max,&iter); printf("Iteracion %d nd=%f ratio=%f\n",i,max,max/max_ant); iter++; } /* Liberamos la memoria */ for(i=2;i<N_MALLAS;i++) { hipFree(&u[i].v); hipFree(&f[i].v); hipFree(&v[i].v); hipFree(&d[i].v); } return 0; } /* This function prints a grid allocated in GPU */ void g_imprime(Grid g, const char *nombre) { float * dg; FILE * fil; fil=fopen(nombre,"w"); float h=1.0/(g.dim-1); int i,j; size_t size=((g.dim-1)*(g.dim-1)+3*(g.dim-1))/2+1; dg=(float*)malloc(size*sizeof(float)); hipMemcpy(dg,g.v,size*sizeof(float),hipMemcpyDeviceToHost); /* Mostramos por pantalla */ for(i=0;i<g.dim;i++) { for(j=0;j<=i;j++) { fprintf(fil,"%f %f %f\n",1.0*j*h,1.0-1.0*i*h, dg[IDT(i,j)]); } fprintf(fil,"\n"); } free(dg); fclose(fil); } void multigrid(Grid *u, Grid *f, Grid *v, Grid *d,int m,float *max,int *iter) { int dim; int dim_; int i; /* Definimos h^2 */ float h2=pow(u[m].dim-1,2); /* Definimos un operador (copiado de otro sitio) */ float operador[9]={0.0,-1.0*h2,0.0,-1.0*h2,4.0*h2,-1.0*h2,0.0,-1.0*h2,0.0}; float * a_op; hipMalloc(&a_op,9*sizeof(float)); hipMemcpy(a_op,&operador[0],9*sizeof(float),hipMemcpyHostToDevice); /* Para el operador en el device */ if(m==2) { /* Dimension, necesitamos bajar la f al host para resolver el sistema lineal */ float * hf; float * hu; dim=(int)pow(2,m)+1; size_t size=((f[m].dim-1)*(f[m].dim-1)+3*(f[m].dim-1))/2+1; hf=(float*)malloc(size*sizeof(float)); hipMemcpy(hf,f[m].v,size*sizeof(float),hipMemcpyDeviceToHost); hu=(float*)malloc(size*sizeof(float)); for(i=0;i<size;i++) hu[i]=0.0; /* Construimos el sistema a resolver */ double A[3][3]; A[0][0]=operador[4]; A[0][1]=operador[7]; A[0][2]=operador[8]; A[1][0]=operador[2]; A[1][1]=operador[4]; A[1][2]=operador[5]; A[2][0]=operador[0]; A[2][1]=operador[3]; A[2][2]=operador[4]; double B[3]; B[0]=hf[IDT(2,1)]; B[1]=hf[IDT(3,1)]; B[2]=hf[IDT(3,2)]; /* Hacemos eliminacin gausiana */ A[1][1]=A[1][1]-A[0][1]*A[1][0]/A[0][0]; A[1][2]=A[1][2]-A[0][2]*A[1][0]/A[0][0]; B[1]=B[1]-B[0]*A[1][0]/A[0][0]; A[2][1]=A[2][1]-A[0][1]*A[2][0]/A[0][0]; A[2][2]=A[2][2]-A[0][2]*A[2][0]/A[0][0]; B[2]=B[2]-B[0]*A[2][0]/A[0][0]; A[2][2]=A[2][2]-A[1][2]*A[2][1]/A[1][1]; B[2]=B[2]-B[1]*A[2][1]/A[1][1]; /* Resolvemos */ hu[IDT(3,2)]=B[2]/A[2][2]; hu[IDT(3,1)]=(B[1]-A[1][2]*hu[IDT(3,2)])/A[1][1]; hu[IDT(2,1)]=(B[0]-A[0][2]*hu[IDT(3,2)]-A[0][1]*hu[IDT(3,1)])/A[0][0]; /* Subimos la solucin a la GPU */ hipMemcpy(u[m].v,hu,size*sizeof(float),hipMemcpyHostToDevice); free(hf); } else { /* Para la llamada a CUDA */ dim=(int)pow(2,m)+1; dim_=(int)pow(2,m-1)+1; dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 dimGrid((dim+BLOCK_SIZE-1)/dimBlock.x,(dim+BLOCK_SIZE-1)/dimBlock.y); dim3 dimGrid_((dim_+BLOCK_SIZE-1)/dimBlock.x,(dim_+BLOCK_SIZE-1)/dimBlock.y); /* Ponemos a 0 las mallas necesarias */ hipLaunchKernelGGL(( cero), dim3(dimGrid),dim3(dimBlock), 0, 0, v[m]); hipLaunchKernelGGL(( cero), dim3(dimGrid),dim3(dimBlock), 0, 0, d[m]); hipLaunchKernelGGL(( cero), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m-1]); hipLaunchKernelGGL(( cero), dim3(dimGrid),dim3(dimBlock), 0, 0, f[m-1]); /* Suavizamos tres colores */ hipLaunchKernelGGL(( suaviza_r), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); hipLaunchKernelGGL(( suaviza_g), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); hipLaunchKernelGGL(( suaviza_b), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); /* Suavizamos tres colores */ hipLaunchKernelGGL(( suaviza_r), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); hipLaunchKernelGGL(( suaviza_g), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); hipLaunchKernelGGL(( suaviza_b), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); /* Calculamos el defecto */ hipLaunchKernelGGL(( defecto), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],d[m],a_op); /* Restringimos el defecto (de d[m] a f[m-1]) */ hipLaunchKernelGGL(( restringe), dim3(dimGrid_),dim3(dimBlock), 0, 0, d[m],f[m-1]); /* Rellamamos a multigrid */ for(i=0;i<2;i++) multigrid(&u[0],&f[0],&v[0],&d[0],m-1,max,iter); /* Interpolate from u[m-1] to v[m] */ hipLaunchKernelGGL(( interpola), dim3(dimGrid_),dim3(dimBlock), 0, 0, u[m-1],v[m]); /* Sumamos */ hipLaunchKernelGGL(( suma), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],v[m]); /* Post-suavizamos tres colores */ hipLaunchKernelGGL(( suaviza_r), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); hipLaunchKernelGGL(( suaviza_g), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); hipLaunchKernelGGL(( suaviza_b), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); /* Post-suavizamos tres colores */ hipLaunchKernelGGL(( suaviza_r), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); hipLaunchKernelGGL(( suaviza_g), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); hipLaunchKernelGGL(( suaviza_b), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],a_op); /* Si estamos en la malla superior,comprobamos cmo va el defecto */ if(m==N_MALLAS-1) { char nombre[256]; sprintf(nombre,"defecto_%d",iter[0]); hipLaunchKernelGGL(( defecto), dim3(dimGrid),dim3(dimBlock), 0, 0, u[m],f[m],d[m],a_op); float * def; size_t size=((f[m].dim-1)*(f[m].dim-1)+3*(f[m].dim-1))/2+1; def=(float*)malloc(size*sizeof(float)); hipMemcpy(def,d[m].v,size*sizeof(float),hipMemcpyDeviceToHost); for(i=0;i<size;i++) { if(max[0]<fabs(def[i])) max[0]=fabs(def[i]); } free(def); } } hipFree(a_op); }
d5650b820166c61c92e9f4b6823648a60c2dbd73.cu
#include "multigrid_kernel.cu" #include <stdio.h> #define N_MALLAS 12 #define BLOCK_SIZE 16 void g_imprime(Grid g); void multigrid(Grid *u, Grid *f, Grid *v, Grid *d,int nivel, float *max,int * iter); void imprime_malla(float * f, int dim, const char * nombre) { FILE * fil; fil=fopen(nombre,"w"); int i,j; float h=1.0/(dim-1); for(i=0;i<dim;i++) { for(j=0;j<=i;j++) { fprintf(fil,"%f %f %f\n",1.0*j*h,1.0-1.0*i*h, f[IDT(i,j)]); } fprintf(fil,"\n"); } fclose(fil); } int main() { int i; int dim; int size; float max=100; float max_ant; int sizetotal=0; /* Definicion de las Mallas */ Grid u[N_MALLAS]; Grid f[N_MALLAS]; Grid v[N_MALLAS]; Grid d[N_MALLAS]; /* Reservamos la memoria */ for(i=2;i<N_MALLAS;i++) { dim=pow(2,i)+1; //Dim es el número de elementos de la "diagonal" size=((dim-1)*(dim-1)+3*(dim-1))/2+1; u[i].dim=dim; f[i].dim=dim; v[i].dim=dim; d[i].dim=dim; u[i].size=size; f[i].size=size; v[i].size=size; d[i].size=size; cudaMalloc(&u[i].v,size*sizeof(float)); cudaMalloc(&f[i].v,size*sizeof(float)); cudaMalloc(&v[i].v,size*sizeof(float)); cudaMalloc(&d[i].v,size*sizeof(float)); sizetotal=sizetotal+4*size; } /* Para la llamada a CUDA */ int m=N_MALLAS-1; dim=(int)pow(2,m)+1; dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 dimGrid((dim+BLOCK_SIZE-1)/dimBlock.x,(dim+BLOCK_SIZE-1)/dimBlock.y); printf("%d %d %d %d\n",dimBlock.x, dimBlock.y,dimGrid.x,dimGrid.y); printf("Necesitamos %d kb:\n" ,sizetotal*sizeof(float)/1024); /* Inicializamos la malla de la funcion */ cero<<<dimGrid,dimBlock>>>(f[m]); /* Initialize u with random values */ random<<<dimGrid,dimBlock>>>(u[m]); /* Principal */ int iter=0; for(i=0;i<20;i++) { max_ant=max; max=0.0; multigrid(&u[0],&f[0],&v[0],&d[0],m,&max,&iter); printf("Iteracion %d nd=%f ratio=%f\n",i,max,max/max_ant); iter++; } /* Liberamos la memoria */ for(i=2;i<N_MALLAS;i++) { cudaFree(&u[i].v); cudaFree(&f[i].v); cudaFree(&v[i].v); cudaFree(&d[i].v); } return 0; } /* This function prints a grid allocated in GPU */ void g_imprime(Grid g, const char *nombre) { float * dg; FILE * fil; fil=fopen(nombre,"w"); float h=1.0/(g.dim-1); int i,j; size_t size=((g.dim-1)*(g.dim-1)+3*(g.dim-1))/2+1; dg=(float*)malloc(size*sizeof(float)); cudaMemcpy(dg,g.v,size*sizeof(float),cudaMemcpyDeviceToHost); /* Mostramos por pantalla */ for(i=0;i<g.dim;i++) { for(j=0;j<=i;j++) { fprintf(fil,"%f %f %f\n",1.0*j*h,1.0-1.0*i*h, dg[IDT(i,j)]); } fprintf(fil,"\n"); } free(dg); fclose(fil); } void multigrid(Grid *u, Grid *f, Grid *v, Grid *d,int m,float *max,int *iter) { int dim; int dim_; int i; /* Definimos h^2 */ float h2=pow(u[m].dim-1,2); /* Definimos un operador (copiado de otro sitio) */ float operador[9]={0.0,-1.0*h2,0.0,-1.0*h2,4.0*h2,-1.0*h2,0.0,-1.0*h2,0.0}; float * a_op; cudaMalloc(&a_op,9*sizeof(float)); cudaMemcpy(a_op,&operador[0],9*sizeof(float),cudaMemcpyHostToDevice); /* Para el operador en el device */ if(m==2) { /* Dimension, necesitamos bajar la f al host para resolver el sistema lineal */ float * hf; float * hu; dim=(int)pow(2,m)+1; size_t size=((f[m].dim-1)*(f[m].dim-1)+3*(f[m].dim-1))/2+1; hf=(float*)malloc(size*sizeof(float)); cudaMemcpy(hf,f[m].v,size*sizeof(float),cudaMemcpyDeviceToHost); hu=(float*)malloc(size*sizeof(float)); for(i=0;i<size;i++) hu[i]=0.0; /* Construimos el sistema a resolver */ double A[3][3]; A[0][0]=operador[4]; A[0][1]=operador[7]; A[0][2]=operador[8]; A[1][0]=operador[2]; A[1][1]=operador[4]; A[1][2]=operador[5]; A[2][0]=operador[0]; A[2][1]=operador[3]; A[2][2]=operador[4]; double B[3]; B[0]=hf[IDT(2,1)]; B[1]=hf[IDT(3,1)]; B[2]=hf[IDT(3,2)]; /* Hacemos eliminación gausiana */ A[1][1]=A[1][1]-A[0][1]*A[1][0]/A[0][0]; A[1][2]=A[1][2]-A[0][2]*A[1][0]/A[0][0]; B[1]=B[1]-B[0]*A[1][0]/A[0][0]; A[2][1]=A[2][1]-A[0][1]*A[2][0]/A[0][0]; A[2][2]=A[2][2]-A[0][2]*A[2][0]/A[0][0]; B[2]=B[2]-B[0]*A[2][0]/A[0][0]; A[2][2]=A[2][2]-A[1][2]*A[2][1]/A[1][1]; B[2]=B[2]-B[1]*A[2][1]/A[1][1]; /* Resolvemos */ hu[IDT(3,2)]=B[2]/A[2][2]; hu[IDT(3,1)]=(B[1]-A[1][2]*hu[IDT(3,2)])/A[1][1]; hu[IDT(2,1)]=(B[0]-A[0][2]*hu[IDT(3,2)]-A[0][1]*hu[IDT(3,1)])/A[0][0]; /* Subimos la solución a la GPU */ cudaMemcpy(u[m].v,hu,size*sizeof(float),cudaMemcpyHostToDevice); free(hf); } else { /* Para la llamada a CUDA */ dim=(int)pow(2,m)+1; dim_=(int)pow(2,m-1)+1; dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 dimGrid((dim+BLOCK_SIZE-1)/dimBlock.x,(dim+BLOCK_SIZE-1)/dimBlock.y); dim3 dimGrid_((dim_+BLOCK_SIZE-1)/dimBlock.x,(dim_+BLOCK_SIZE-1)/dimBlock.y); /* Ponemos a 0 las mallas necesarias */ cero<<<dimGrid,dimBlock>>>(v[m]); cero<<<dimGrid,dimBlock>>>(d[m]); cero<<<dimGrid,dimBlock>>>(u[m-1]); cero<<<dimGrid,dimBlock>>>(f[m-1]); /* Suavizamos tres colores */ suaviza_r<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); suaviza_g<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); suaviza_b<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); /* Suavizamos tres colores */ suaviza_r<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); suaviza_g<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); suaviza_b<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); /* Calculamos el defecto */ defecto<<<dimGrid,dimBlock>>>(u[m],f[m],d[m],a_op); /* Restringimos el defecto (de d[m] a f[m-1]) */ restringe<<<dimGrid_,dimBlock>>>(d[m],f[m-1]); /* Rellamamos a multigrid */ for(i=0;i<2;i++) multigrid(&u[0],&f[0],&v[0],&d[0],m-1,max,iter); /* Interpolate from u[m-1] to v[m] */ interpola<<<dimGrid_,dimBlock>>>(u[m-1],v[m]); /* Sumamos */ suma<<<dimGrid,dimBlock>>>(u[m],v[m]); /* Post-suavizamos tres colores */ suaviza_r<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); suaviza_g<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); suaviza_b<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); /* Post-suavizamos tres colores */ suaviza_r<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); suaviza_g<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); suaviza_b<<<dimGrid,dimBlock>>>(u[m],f[m],a_op); /* Si estamos en la malla superior,comprobamos cómo va el defecto */ if(m==N_MALLAS-1) { char nombre[256]; sprintf(nombre,"defecto_%d",iter[0]); defecto<<<dimGrid,dimBlock>>>(u[m],f[m],d[m],a_op); float * def; size_t size=((f[m].dim-1)*(f[m].dim-1)+3*(f[m].dim-1))/2+1; def=(float*)malloc(size*sizeof(float)); cudaMemcpy(def,d[m].v,size*sizeof(float),cudaMemcpyDeviceToHost); for(i=0;i<size;i++) { if(max[0]<fabs(def[i])) max[0]=fabs(def[i]); } free(def); } } cudaFree(a_op); }
8a6343ce9da4e92ebf26eaacb67a58891fcc8d72.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include <rocblas.h> #include "rocm_smi/rocm_smi.h" // includes, project //#include "magma.h" #include "cuda_multi_gemm_unif.cu" //#define DEBUGPRINT 0 __global__ void fluxes_full_field_gpu_kernel_fillq(double *vtrans, double *vx, double *vy, double *vz, double *pr, double *t, double *csound, double *phig, double *vdiff, double *fatface,int irho, int iux, int iuy, int iuz, int ipr, int ithm, int isnd, int iph, int icvf, int icpf, int imuf, int ikndf, int ilamf, int iwm, int iwp, int icv, int icp, int imu, int iknd, int ilam,int *iface_flux, int nnel, int nxz2ldim, int lxyz,int lxz, int ivarcoef,int leltlxyz ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<nnel){ int e = id/nxz2ldim; int j = id % nxz2ldim; //fillq vtrans int i = iface_flux[id]-1; // because forgot to -1 in the follows fatface[(iwp-1)+id] = vtrans[e*lxyz+i]; // following works because ndg_face is same as nnel. Talk with Dr. Tania. adeesha fatface[(iwm-1)+(irho-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vx fatface[(iwp-1)+id] = vx[e*lxyz+i]; fatface[(iwm-1)+(iux-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vy fatface[(iwp-1)+id] = vy[e*lxyz+i]; fatface[(iwm-1)+(iuy-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vz fatface[(iwp-1)+id] = vz[e*lxyz+i]; fatface[(iwm-1)+(iuz-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq pr fatface[(iwp-1)+id] = pr[e*lxyz+i]; fatface[(iwm-1)+(ipr-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq t fatface[(iwp-1)+id] = t[e*lxyz+i]; fatface[(iwm-1)+(ithm-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq csound fatface[(iwp-1)+id] = csound[e*lxyz+i]; fatface[(iwm-1)+(isnd-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq phig fatface[(iwp-1)+id] = phig[e*lxyz+i]; fatface[(iwm-1)+(iph-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vtrans icv fatface[(iwp-1)+id] = vtrans[(icv-1)*leltlxyz+e*lxyz+i]; fatface[(iwm-1)+(icvf-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vtrans icp fatface[(iwp-1)+id] = vtrans[(icp-1)*leltlxyz+e*lxyz+i]; fatface[(iwm-1)+(icpf-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vdiff imu fatface[(iwp-1)+id] = vdiff[(imu-1)*leltlxyz+e*lxyz+i]; fatface[(iwm-1)+(imuf-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vdiff iknd fatface[(iwp-1)+id] = vdiff[(iknd-1)*leltlxyz+e*lxyz+i]; fatface[(iwm-1)+(ikndf-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vdiff ilam fatface[(iwp-1)+id] = vdiff[(ilam-1)*leltlxyz+e*lxyz+i]; fatface[(iwm-1)+(ilam-1)*ivarcoef+id] = fatface[(iwp-1)+id]; // if(id<10){ // printf("$$$ pfc i=%d, vtrans[%d]=%lf,vx[%d]=%lf,vy[%d]=%lf,vz[%d]=%lf,pr[%d]=%lf,t[%d]=%lf,csound[%d]=%lf,phig[%d]=%lf,vtrans[%d]=%lf,vdiff[%d]=%lf,vtrans[%d]=%lf \n",i,e*lxyz+i,vtrans[e*lxyz+i],e*lxyz+i,vx[e*lxyz+i],e*lxyz+i,vy[e*lxyz+i],e*lxyz+i,vz[e*lxyz+i],e*lxyz+i,pr[e*lxyz+i],e*lxyz+i,t[e*lxyz+i],e*lxyz+i,csound[e*lxyz+i],e*lxyz+i,phig[e*lxyz+i],e*lxyz+i,vtrans[e*lxyz+i],e*lxyz+i,vdiff[e*lxyz+i],(icp-1)*leltlxyz+e*lxyz+i,vtrans[(icp-1)*leltlxyz+e*lxyz+i]); // } } } __global__ void fluxes_full_field_gpu_kernel_faceu(double *fatface, double *u,int i_cvars, int nneltoteq, int nnel, int toteq, int lxyz, int iwm, int iph,int *iface_flux,int nxz2ldim,int ivarcoef){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<nneltoteq){ int ivar = id/nnel; int e_n = id%(nnel);; int e = e_n/nxz2ldim; int i = iface_flux[e_n]; //full2face_cmt fatface[(i_cvars-1)+id] =u[e*toteq*lxyz+ivar*lxyz+i-1]; fatface[(i_cvars-1)+id]= fatface[(i_cvars-1)+id]/fatface[iwm-1+ivarcoef*(iph-2)+id];// invcol2 // check with Dr.Tania. above functions may not work properly. } } extern "C" void fluxes_full_field_gpu_wrapper_(int *glbblockSize2,double *d_fatface,double *d_vtrans,double *d_u, double *d_vx, double *d_vy, double *d_vz, double *d_pr, double *d_t, double *d_csound, double *d_phig, double *d_vdiff, int *irho, int *iux, int *iuy, int *iuz, int *ipr, int *ithm, int *isnd, int *iph, int *icvf, int *icpf, int *imuf, int *ikndf, int *ilamf, int *iwm, int *iwp, int *icv, int *icp, int *imu, int *iknd, int *ilam,int *d_iface_flux, int *nelt, int *lx1, int *ly1, int *lz1, int *ldim, int *lelt, int *i_cvars,int *toteq, int *nqq){ #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); // if (code1 != hipSuccess){ printf("CUDA: Start fluxes_full_field_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start fluxes_full_field_gpu_wrapper values irho = %d, iux= %d,iuy= %d,iuz= %d,ipr= %d,ithm= %d,isnd= %d,iph= %d, icvf= %d,icpf= %d,imuf= %d,ikndf= %d,ilamf= %d,iwm= %d,iwp= %d,icv= %d, icp= %d,imu= %d,iknd= %d,ilam= %d,nelt= %d,lx1= %d,ly1= %d,lz1= %d,ldim= %d,lelt= %d,i_cvars= %d,toteq= %d,nqq=%d \n",irho[0],iux[0],iuy[0],iuz[0],ipr[0],ithm[0],isnd[0],iph[0],icvf[0],icpf[0],imuf[0],ikndf[0],ilamf[0],iwm[0],iwp[0],icv[0],icp[0],imu[0],iknd[0],ilam[0],nelt[0],lx1[0],ly1[0],lz1[0],ldim[0],lelt[0],i_cvars[0],toteq[0],nqq[0]); // } #endif int nxz2ldim = lx1[0]*lz1[0]*2*ldim[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int leltlxyz = lelt[0]*lxyz; int lxz = lx1[0]*lz1[0]; int nnel = nelt[0]*nxz2ldim; int ivarcoef = nxz2ldim*lelt[0]; int nnelnqq = nnel*nqq[0]; int blockSize = glbblockSize2[0], gridSize; gridSize = (int)ceil((float)nnel/blockSize); hipLaunchKernelGGL(( fluxes_full_field_gpu_kernel_fillq), dim3(gridSize), dim3(blockSize), 0, 0, d_vtrans, d_vx, d_vy, d_vz,d_pr, d_t, d_csound,d_phig,d_vdiff,d_fatface,irho[0],iux[0], iuy[0], iuz[0], ipr[0],ithm[0],isnd[0],iph[0],icvf[0],icpf[0],imuf[0],ikndf[0],ilamf[0],iwm[0],iwp[0],icv[0],icp[0],imu[0],iknd[0],ilam[0],d_iface_flux,nnel, nxz2ldim,lxyz,lxz,ivarcoef, leltlxyz); #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: fluxes_full_field_gpu_wrapper after fillq cuda status: %s\n",hipGetErrorString(code1)); #endif gridSize = (int)ceil((float)nnel*toteq[0]/blockSize); hipLaunchKernelGGL(( fluxes_full_field_gpu_kernel_faceu), dim3(gridSize), dim3(blockSize), 0, 0, d_fatface,d_u,i_cvars[0],nnel*toteq[0],nnel, toteq[0], lxyz,iwm[0],iph[0],d_iface_flux,nxz2ldim,ivarcoef); #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); // if (code1 != hipSuccess){ printf("CUDA: End fluxes_full_field_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2)); #endif } __global__ void inviscidFlux_gpu_kernel1(double *jaco_c,double *area,double *wghtc,int ntot){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ jaco_c[id]= area[id]/wghtc[id]; } } __global__ void inviscidFlux_gpu_kernel2(double *jaco_f,double *wghtf,int ntotd){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntotd){ jaco_f[id]=jaco_f[id]*wghtf[id]; } } __global__ void inviscidFlux_gpu_kernel3(double *unx,double *uny,double *unz,double *nx,double *ny,double *nz,double *rl,double *ul,double *vl,double *wl,double *pl,double *tl,double *al,double *cpl,double *rr,double *ur,double *vr,double *wr,double *pr,double *tr,double *ar,double *cpr,double *phl,double *jaco_f,double *fatface,double *area,int iwm,int iwp,int irho,int iux,int iuy,int iuz,int ipr,int ithm,int isnd,int icpf,int iph,int lxz2ldimlelt,int ntot){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ nx[id]=unx[id]; ny[id]=uny[id]; nz[id]=unz[id]; rl[id]=fatface[iwm-1+(irho-1)*lxz2ldimlelt+id]; // send the calculate array index for the optimizations later. adeesha. ul[id]=fatface[iwm-1+(iux-1)*lxz2ldimlelt+id]; vl[id]=fatface[iwm-1+(iuy-1)*lxz2ldimlelt+id]; wl[id]=fatface[iwm-1+(iuz-1)*lxz2ldimlelt+id]; pl[id]=fatface[iwm-1+(ipr-1)*lxz2ldimlelt+id]; tl[id]=fatface[iwm-1+(ithm-1)*lxz2ldimlelt+id]; al[id]=fatface[iwm-1+(isnd-1)*lxz2ldimlelt+id]; cpl[id]=fatface[iwm-1+(icpf-1)*lxz2ldimlelt+id]; rr[id]=fatface[iwp-1+(irho-1)*lxz2ldimlelt+id]; ur[id]=fatface[iwp-1+(iux-1)*lxz2ldimlelt+id]; vr[id]=fatface[iwp-1+(iuy-1)*lxz2ldimlelt+id]; wr[id]=fatface[iwp-1+(iuz-1)*lxz2ldimlelt+id]; pr[id]=fatface[iwp-1+(ipr-1)*lxz2ldimlelt+id]; tr[id]=fatface[iwp-1+(ithm-1)*lxz2ldimlelt+id]; ar[id]=fatface[iwp-1+(isnd-1)*lxz2ldimlelt+id]; cpr[id]=fatface[iwp-1+(icpf-1)*lxz2ldimlelt+id]; phl[id]=fatface[iwp-1+(iph-1)*lxz2ldimlelt+id]; jaco_f[id]=area[id]; } } __global__ void inviscidFlux_gpu_kernel4(double *flx,double *phl,int ntotd){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntotd){ flx[id]=flx[id]*phl[id]; } } __global__ void Ausm_flux(int neq, int ntotd, double *nx, double *ny, double *nz, double *nm, double *fs, double *rl, double *ul, double *vl, double *wl, double *pl, double *al, double *tl, double *rr, double *ur, double *vr, double *wr, double *pr, double *ar, double *tr, double *flx, double *cpl, double *cpr){ int i = blockIdx.x*blockDim.x+threadIdx.x; //ntotd = nel * nfaces * nxzd if(i<ntotd){ cpl[i]=cpl[i]/rl[i];//invcol2 cpr[i]=cpr[i]/rl[i];//invcol2 fs[i] = 0;// it is 0 in cmtbone but can be changed double af,mf,mfa,mfm,mfp,ml,mla,mlp,mr,mra,mrm,pf,ql,qr,wtl,wtr,Hl,Hr; Hl = cpl[i]*tl[i] + 0.5*(ul[i]*ul[i]+vl[i]*vl[i]+wl[i]*wl[i]); Hr = cpr[i]*tr[i] + 0.5*(ur[i]*ur[i]+vr[i]*vr[i]+wr[i]*wr[i]); ql = ul[i]*nx[i] + vl[i]*ny[i] + wl[i]*nz[i] - fs[i]; qr = ur[i]*nx[i] + vr[i]*ny[i] + wr[i]*nz[i] - fs[i]; af = 0.5*(al[i] + ar[i]); ml = ql/af; mla = abs(ml); mr = qr/af; mra = abs(mr); if(mla <= 1.0){ mlp = 0.25*pow((ml+1.0),2) + 0.125*pow((ml*ml-1.0),2); wtl = 0.25*pow(ml+1.0,2)*(2.0-ml) + 0.1875*ml*pow(ml*ml-1.0,2); } else{ mlp = 0.5*(ml+mla); wtl = 0.5*(1.0+ml/mla); } if(mra <= 1.0){ mrm = -0.25*pow((mr-1.0),2) - 0.125*pow((mr*mr-1.0),2); wtr = 0.25*pow(mr-1.0,2)*(2.0+mr) - 0.1875*mr*pow(mr*mr-1.0,2); } else{ mrm = 0.5*(mr-mra); wtr = 0.5*(1.0-mr/mra); } mf = mlp + mrm; mfa = abs(mf); mfp = 0.5*(mf+mfa); mfm = 0.5*(mf-mfa); pf = wtl*pl[i] + wtr*pr[i]; //compute fluxes flx[i] = (af*(mfp*rl[i] + mfm*rr[i])) * nm[i]; flx[1*ntotd+i] = (af*(mfp*rl[i]*ul[i] + mfm*rr[i]*ur[i])+pf*nx[i]) * nm[i]; flx[2*ntotd+i] = (af*(mfp*rl[i]*vl[i] + mfm*rr[i]*vr[i])+pf*ny[i]) * nm[i]; flx[3*ntotd+i] = (af*(mfp*rl[i]*wl[i] + mfm*rr[i]*wr[i])+pf*nz[i]) * nm[i]; flx[4*ntotd+i] = (af*(mfp*rl[i]*Hl + mfm*rr[i]*Hr)+pf*fs[i]) * nm[i]; } } void map_faced(double *d_jgl, double *d_jgt, double *ju, double *u, double *d_w, int nx1, int nxd, int fdim, int nelt, int nfaces, int idir){ #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); printf("CUDA: Start map_faced cuda status: %s\n",hipGetErrorString(code1)); #endif hipStream_t stream; hipStreamCreate( &stream ); const double alpha = 1; const double beta = 0; int nx1_2 = pow(nx1,2); int nxd_2 = pow(nxd,2); int batchSize = nelt*nfaces; int successvalue=-1; if(idir==0){ int blockSize = 1024, gridSize; //calc w(nxd,nx1) = jgl(nxd*nx1) * u(nx1,nx1) in fortran //calc w(nx1,nxd) = u(nx1,nx1) * jgl(nx1,nxd) in C gridSize = (int)ceil((float)nelt*nfaces*nx1*nxd/blockSize); successvalue = cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nx1, nx1, &alpha, d_jgl, nxd, 0, u, nx1, nx1_2, &beta, d_w, nxd, nx1*nxd, batchSize, gridSize); #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: map_faced afte 1st multigemm in idir=0 successvalue =%d and cuda status: %s\n",successvalue,hipGetErrorString(code1)); #endif //calc ju(nxd,nxd) = w(nxd,nx1) * jgt(nx1,nxd) in fortran //calc ju(nxd,nxd) = jgt(nxd,nx1) * w(nx1,nxd) gridSize = (int)ceil((float)nelt*nfaces*nxd*nxd/blockSize); successvalue= cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nx1, nxd, &alpha, d_w, nxd, nx1*nxd, d_jgt, nx1, 0, &beta, ju, nxd, nxd_2, batchSize, gridSize); #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: map_faced afte 2nd multigemm in idir=0 successvalue =%d and cuda status: %s\n",successvalue,hipGetErrorString(code1)); #endif } else{ int blockSize = 1024, gridSize; //calc w(nx1,nxd) = jgt(nx1,nxd) * u(nxd,nxd) in fortran //calc w(nxd,nx1) = u(nxd,nxd) * jgt(nxd,nx1) in C successvalue= gridSize = (int)ceil((float)nelt*nfaces*nx1*nxd/blockSize); cuda_multi_gemm_unif(stream, 'N', 'N', nx1, nxd, nxd, &alpha, d_jgt, nx1, 0, u, nxd, nxd_2, &beta, d_w, nx1, nx1*nxd, batchSize, gridSize); #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: map_faced afte 1st multigemm in idir!=0 successvalue =%d and cuda status: %s\n",successvalue,hipGetErrorString(code1)); #endif //calc ju(nx1,nx1) = w(nx1,nxd) * jgl(nxd,nx1) in fortran //calc ju(nx1,nx1) = jgl(nx1,nxd) * w(nxd,nx1) in C gridSize = (int)ceil((float)nelt*nfaces*nx1*nx1/blockSize); successvalue= cuda_multi_gemm_unif(stream, 'N', 'N', nx1, nxd, nx1, &alpha, d_w, nx1, nx1*nxd, d_jgl, nxd, 0, &beta, ju, nx1, nx1_2, batchSize, gridSize); #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: map_faced afte 2nd multigemm in idir!=0 successvalue =%d and cuda status: %s\n",successvalue,hipGetErrorString(code1)); #endif } hipStreamDestroy(stream); } extern "C" void inviscidflux_gpu_wrapper_(int *glbblockSize2,double *jgl,double *jgt,double *d_unx,double *d_uny,double *d_unz,double *d_area,double *d_fatface,double *d_wghtc,double *d_wghtf,int *irho,int *iux,int *iuy,int *iuz,int *ipr,int *ithm,int *isnd,int *icpf,int *iph,int *iwm,int *iwp,int *iflx,int *ldim,int *lxd,int *lzd,int *nelt,int *lelt,int *toteq,int *lx1,int *ly1,int *lz1 ){ #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); printf("CUDA: Start inviscidFlux_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start inviscidFlux_gpu_wrapper values irho = %d, iux= %d,iuy= %d,iuz= %d,ipr= %d,ithm= %d,isnd= %d,iph= %d,icpf= %d,iwm= %d,iwp= %d,nelt= %d,lx1= %d,ly1= %d,lz1= %d,ldim= %d,lelt= %d,toteq= %d \n",irho[0],iux[0],iuy[0],iuz[0],ipr[0],ithm[0],isnd[0],iph[0],icpf[0],iwm[0],iwp[0],nelt[0],lx1[0],ly1[0],lz1[0],ldim[0],lelt[0],toteq[0]); #endif int fdim = ldim[0]-1; int nfaces = 2*ldim[0]; int lxz=lx1[0]*lz1[0]; int ntot= nelt[0]*nfaces*lxz; int ntotd = nelt[0] * nfaces * lxd[0]*lzd[0]; int lxd_3= lxd[0]*lxd[0]*lxd[0]; int nel=nelt[0]; int lxz2ldim=lxz*nfaces; int lxz2ldimlelt=lxz2ldim*lelt[0]; double *d_jgl; double *d_jgt; hipMalloc(&d_jgl, lxd_3*sizeof(double)); hipMalloc(&d_jgt, lxd_3*sizeof(double)); hipMemcpy(d_jgl, jgl,lxd_3*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_jgt, jgt,lxd_3*sizeof(double), hipMemcpyHostToDevice); double *d_w; double *d_nx; double *d_ny; double *d_nz; double *d_rl; double *d_ul; double *d_wl; double *d_vl; double *d_pl; double *d_tl; double *d_al; double *d_cpl; double *d_rr; double *d_ur; double *d_wr; double *d_vr; double *d_pr; double *d_tr; double *d_ar; double *d_cpr; double *d_jaco_c; double *d_jaco_f; double *d_phl; double *d_fs; double *d_flx; hipMalloc(&d_w, ntotd*sizeof(double)); hipMalloc(&d_nx, ntotd*sizeof(double)); hipMalloc(&d_ny, ntotd*sizeof(double)); hipMalloc(&d_nz, ntotd*sizeof(double)); hipMalloc(&d_rl, ntotd*sizeof(double)); hipMalloc(&d_ul, ntotd*sizeof(double)); hipMalloc(&d_wl, ntotd*sizeof(double)); hipMalloc(&d_vl, ntotd*sizeof(double)); hipMalloc(&d_pl, ntotd*sizeof(double)); hipMalloc(&d_tl, ntotd*sizeof(double)); hipMalloc(&d_al, ntotd*sizeof(double)); hipMalloc(&d_cpl, ntotd*sizeof(double)); hipMalloc(&d_rr, ntotd*sizeof(double)); hipMalloc(&d_ur, ntotd*sizeof(double)); hipMalloc(&d_vr, ntotd*sizeof(double)); hipMalloc(&d_wr, ntotd*sizeof(double)); hipMalloc(&d_pr, ntotd*sizeof(double)); hipMalloc(&d_tr, ntotd*sizeof(double)); hipMalloc(&d_ar, ntotd*sizeof(double)); hipMalloc(&d_cpr, ntotd*sizeof(double)); hipMalloc(&d_jaco_c, ntot*sizeof(double)); hipMalloc(&d_jaco_f, ntotd*sizeof(double)); hipMalloc(&d_phl, ntotd*sizeof(double)); hipMalloc(&d_fs, ntotd*sizeof(double)); hipMalloc(&d_flx, ntotd*toteq[0]*sizeof(double)); int totpts =lxz2ldimlelt; int blockSize = glbblockSize2[0], gridSize; if(lxd[0]>lx1[0]){ map_faced(d_jgl, d_jgt, d_nx, d_unx, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_ny, d_uny, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_nz, d_unz, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_rl, d_fatface+iwm[0]-1+(irho[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_ul, d_fatface+iwm[0]-1+(iux[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_vl, d_fatface+iwm[0]-1+(iuy[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_wl, d_fatface+iwm[0]-1+(iuz[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_pl, d_fatface+iwm[0]-1+(ipr[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_tl, d_fatface+iwm[0]-1+(ithm[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_al, d_fatface+iwm[0]-1+(isnd[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_cpl, d_fatface+iwm[0]-1+(icpf[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_rr, d_fatface+iwp[0]-1+(irho[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_ur, d_fatface+iwp[0]-1+(iux[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_vr, d_fatface+iwp[0]-1+(iuy[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_wr, d_fatface+iwp[0]-1+(iuz[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_pr, d_fatface+iwp[0]-1+(ipr[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_tr, d_fatface+iwp[0]-1+(ithm[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_ar, d_fatface+iwp[0]-1+(isnd[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_cpr, d_fatface+iwp[0]-1+(icpf[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA:inviscidFlux_gpu_wrapper after map_faced cuda status: %s\n",hipGetErrorString(code1)); #endif gridSize = (int)ceil((float)ntot/blockSize); hipLaunchKernelGGL(( inviscidFlux_gpu_kernel1), dim3(gridSize), dim3(blockSize), 0, 0, d_jaco_c,d_area,d_wghtc,ntot); map_faced(d_jgl, d_jgt, d_jaco_f, d_jaco_c, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); gridSize = (int)ceil((float)ntotd/blockSize); hipLaunchKernelGGL(( inviscidFlux_gpu_kernel2), dim3(gridSize), dim3(blockSize), 0, 0, d_jaco_f,d_wghtf,ntotd); #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: inviscidFlux_gpu_wrapper after kernen2 cuda status: %s\n",hipGetErrorString(code1)); #endif } else{ gridSize = (int)ceil((float)ntot/blockSize); hipLaunchKernelGGL(( inviscidFlux_gpu_kernel3), dim3(gridSize), dim3(blockSize), 0, 0, d_unx,d_uny,d_unz,d_nx,d_ny,d_nz,d_rl,d_ul,d_vl,d_wl,d_pl,d_tl,d_al,d_cpl,d_rr,d_ur,d_vr,d_wr,d_pr,d_tr,d_ar,d_cpr,d_phl,d_jaco_f,d_fatface,d_area,iwm[0],iwp[0],irho[0],iux[0],iuy[0],iuz[0],ipr[0],ithm[0],isnd[0],icpf[0],iph[0],lxz2ldimlelt,ntot ); } gridSize = (int)ceil((float)ntotd/blockSize); hipLaunchKernelGGL(( Ausm_flux), dim3(gridSize), dim3(blockSize), 0, 0, toteq[0],ntotd, d_nx, d_ny, d_nz, d_jaco_f, d_fs, d_rl, d_ul, d_vl, d_wl, d_pl, d_al, d_tl, d_rr, d_ur, d_vr, d_wr, d_pr, d_ar, d_tr, d_flx, d_cpl, d_cpr); if(lxd[0]>lx1[0]){ for(int j=0; j<toteq[0];j++){ gridSize = (int)ceil((float)ntotd/blockSize); hipLaunchKernelGGL(( inviscidFlux_gpu_kernel4), dim3(gridSize), dim3(blockSize), 0, 0, d_flx+j*ntotd,d_phl,ntotd); map_faced(d_jgl, d_jgt,d_fatface+iflx[0]-1+j*totpts, d_flx+j*ntotd, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 1); } #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: inviscidFlux_gpu_wrapper after forloop cuda status: %s\n",hipGetErrorString(code1)); #endif } else{ printf("else part yet to implemented \n"); } hipFree(d_jgl); hipFree(d_jgt); hipFree(d_nx); hipFree(d_ny); hipFree(d_nz); hipFree(d_rl); hipFree(d_ul); hipFree(d_wl); hipFree(d_vl); hipFree(d_pl); hipFree(d_tl); hipFree(d_al); hipFree(d_cpl); hipFree(d_rr); hipFree(d_ur); hipFree(d_wr); hipFree(d_vr); hipFree(d_pr); hipFree(d_tr); hipFree(d_ar); hipFree(d_cpr); hipFree(d_jaco_c); hipFree(d_jaco_f); hipFree(d_phl); hipFree(d_fs); hipFree(d_flx); #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: End inviscidFlux_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1)); #endif } __global__ void surface_integral_full_gpu_kernel(double *iface_flux, double *res1, double *flux, int ntot,int lxyz,int lxyzlelt,int nxz2ldim,int eq ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e = id/nxz2ldim; //add_face2full_cmt(nel,nx,ny,nz,iface,vols,faces) int newi=iface_flux[id]; res1[eq*lxyzlelt+e*lxyz+newi-1]= res1[eq*lxyzlelt+e*lxyz+newi-1]+flux[id]; } } extern "C" void surface_integral_full_gpu_wrapper_(int *glbblockSize2,double *d_res1,double *d_flux,int *eq,int *ieq,int *nelt, int *lelt, int *toteq,int *lx1,int *ly1,int *lz1,int *ldim,double *d_iface_flux ){ #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); printf("CUDA: Start surface_integral_full_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start surface_integral_full_gpu_wrapper values eq = %d,ieq= %d,nelt= %d,lelt=%d,toteq= %d,lx1=%d,ly1= %d,lz1= %d,ldim= %d\n",eq[0],ieq[0],nelt[0],lelt[0],toteq[0],lx1[0],ly1[0],lz1[0],ldim[0]); #endif int nxz2ldim = lx1[0]*lz1[0]*2*ldim[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int lxyzlelt = lelt[0]*lxyz; int ntot = nelt[0]*nxz2ldim; int ivarcoef = nxz2ldim*lelt[0]; int blockSize = glbblockSize2[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); hipLaunchKernelGGL(( surface_integral_full_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_iface_flux,d_res1,d_flux+ieq[0]-1,ntot,lxyz,lxyzlelt,nxz2ldim,eq[0]); #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); printf("CUDA: End surface_integral_full_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2)); #endif } __global__ void igu_cmt_gpu_wrapper1_gpu_kernel(double *flux, double *graduf,int ntot,int lxz2ldimlelt,int toteq,int iwp ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ for(int eq=0;eq<toteq;eq++){ flux[iwp-1+eq*lxz2ldimlelt +id]=graduf[eq*lxz2ldimlelt+id]; graduf[eq*lxz2ldimlelt+id]=graduf[eq*lxz2ldimlelt+id]*0.5; } } } __global__ void igu_cmt_gpu_wrapper2_gpu_kernel(double *flux,double *graduf,char *cbc,double *unx,double *uny,double *unz,int ntot,int lxz2ldim,int lxz2ldimlelt,int lxz2ldimlelttoteq,int toteq,int iwp,int iwm,int ldim,int irho,int icvf,int ilamf,int imuf,int iux,int iuy,int iuz,int iu5,int if3d,int lxz,int a2ldim ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e = id/lxz2ldim; int iface= ((id/lxz)%a2ldim); for(int eq=0;eq<toteq;eq++){ flux[iwp-1+eq*lxz2ldimlelt +id]=flux[iwp-1+eq*lxz2ldimlelt +id]-graduf[eq*lxz2ldimlelt+id]; } //igu_dirichlet(flxscr,gdudxk) char cb2= cbc[e*18+iface]; if(cb2 !='E' && cb2 !='P'){ for (int eq=0;eq<toteq;eq++){ graduf[eq*lxz2ldimlelt+id] =graduf[eq*lxz2ldimlelt+id]*2.0; flux[iwp-1+eq*lxz2ldimlelt +id]=graduf[eq*lxz2ldimlelt+id]; } } //bcflux(flxscr,gdudxk,wminus) if (cbc[e*18+iface]!='E'&& cbc[e*18+iface]!='P'){ char cb1= cbc[e*18+iface]; if(cb1=='I'){ flux[iwp-1+id]=0; for (int eq=1;eq<ldim+1;eq++){ flux[iwp-1+eq*lxz2ldimlelt +id]=graduf[eq*lxz2ldimlelt+id]; } flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]=0; //a5adiabatic_wall(flux(1,1,1,toteq),f,e,agradu,qminus) //call a51dUadia(flxscr,f,e,dU,wstate) double dU1x=graduf[0*lxz2ldimlelttoteq+0*lxz2ldimlelt+id]; double dU2x=graduf[0*lxz2ldimlelttoteq+1*lxz2ldimlelt+id]; double dU3x=graduf[0*lxz2ldimlelttoteq+2*lxz2ldimlelt+id]; double dU4x=graduf[0*lxz2ldimlelttoteq+3*lxz2ldimlelt+id]; double dU5x=graduf[0*lxz2ldimlelttoteq+4*lxz2ldimlelt+id]; double dU1y=graduf[1*lxz2ldimlelttoteq+0*lxz2ldimlelt+id]; double dU2y=graduf[1*lxz2ldimlelttoteq+1*lxz2ldimlelt+id]; double dU3y=graduf[1*lxz2ldimlelttoteq+2*lxz2ldimlelt+id]; double dU4y=graduf[1*lxz2ldimlelttoteq+3*lxz2ldimlelt+id]; double dU5y=graduf[1*lxz2ldimlelttoteq+4*lxz2ldimlelt+id]; double dU1z=graduf[2*lxz2ldimlelttoteq+0*lxz2ldimlelt+id]; double dU2z=graduf[2*lxz2ldimlelttoteq+1*lxz2ldimlelt+id]; double dU3z=graduf[2*lxz2ldimlelttoteq+2*lxz2ldimlelt+id]; double dU4z=graduf[2*lxz2ldimlelttoteq+3*lxz2ldimlelt+id]; double dU5z=graduf[2*lxz2ldimlelttoteq+4*lxz2ldimlelt+id]; double rho =flux[iwm-1+(irho-1)*lxz2ldimlelt +id]; double cv =flux[iwm-1+(icvf-1)*lxz2ldimlelt +id]/rho; double lambda=flux[iwm-1+(ilamf-1)*lxz2ldimlelt +id]; double mu =flux[iwm-1+(imuf-1)*lxz2ldimlelt +id]; double K =0.0;// ADIABATIC HARDCODING double u1 =flux[iwm-1+(iux-1)*lxz2ldimlelt +id]; double u2 =flux[iwm-1+(iuy-1)*lxz2ldimlelt +id]; double u3 =flux[iwm-1+(iuz-1)*lxz2ldimlelt +id]; double E =flux[iwm-1+(iu5-1)*lxz2ldimlelt +id]/rho; double lambdamu=lambda+mu; double kmcvmu=K-cv*mu; double t_flux=(K*dU5x+cv*lambda*u1*dU4z-kmcvmu*u3*dU4x+cv*lambda*u1*dU3y-kmcvmu*u2*dU3x+cv*mu*u3*dU2z+cv*mu*u2*dU2y+(cv*lambda-K+2*cv*mu)*u1*dU2x-cv*lambdamu*u1*u3*dU1z-cv*lambdamu*u1*u2*dU1y+(K*u3*u3-cv*mu*u3*u3+K*u2*u2-cv*mu*u2*u2-cv*lambda*u1*u1+K*u1*u1-2*cv*mu*u1*u1-E*K)*dU1x)/(cv*rho); flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]=flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]+t_flux*unx[id]; //a52dUadia_gpu(flux,f,ie,dU,wstate) t_flux=(K*dU5y+cv*lambda*u2*dU4z-kmcvmu*u3*dU4y+cv*mu*u3*dU3z+(cv*lambda-K+2*cv*mu)*u2*dU3y+cv*mu*u1*dU3x-kmcvmu*u1*dU2y+cv*lambda*u2*dU2x-cv*lambdamu*u2*u3*dU1z+(K*u3*u3-cv*mu*u3*u3-cv*lambda*u2*u2+K*u2*u2-2*cv*mu*u2*u2+K*u1*u1-cv*mu*u1*u1-E*K)*dU1y-cv*lambdamu*u1*u2*dU1x)/(cv*rho); flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]=flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]+t_flux*uny[id]; if(if3d){ t_flux=(K*(dU5z-E*dU1z)+cv*u3*(lambda*dU4z+2*mu*dU4z+lambda*dU3y+lambda*dU2x)-K*u3*dU4z+cv*mu*u2*(dU4y+dU3z)+cv*mu*u1*(dU4x+dU2z)-K*u2*dU3z-K*u1*dU2z-cv*(lambda+2*mu)*u3*u3*dU1z+K*u3*u3*dU1z+K*u2*u2*dU1z-cv*mu*u2*u2*dU1z+K*u1*u1*dU1z-cv*mu*u1*u1*dU1z-cv*(lambda+mu)*u2*u3*dU1y-cv*(lambda+mu)*u1*u3*dU1x)/(cv*rho); flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]=flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]+t_flux*unz[id]; } } else{ for (int eq=0;eq<toteq;eq++){ flux[iwp-1+eq*lxz2ldimlelt+id]=0; } } } //chsign(flxscr,ntot) for (int eq=0;eq<toteq;eq++){ flux[iwp-1+eq*lxz2ldimlelt +id]=-1*flux[iwp-1+eq*lxz2ldimlelt +id]; } } } extern "C" void igu_cmt_gpu_wrapper1_(int *glbblockSize2,double *d_flux,double *d_graduf,int *toteq,int *iwp,int *lx1,int *ly1,int *lz1,int *ldim,int *nelt,int *lelt){ #ifdef DEBUGPRINT hipError_t code1 = hipPeekAtLastError(); printf("CUDA: Start igu_cmt_gpu_wrapper1 cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start igu_cmt_gpu_wrapper1 values toteq=%d ,iwp=%d ,lx1=%d ,ly1=%d ,lz1=%d ,ldim=%d ,nelt=%d ,lelt=%d ,\n",toteq[0],iwp[0],lx1[0],ly1[0],lz1[0],ldim[0],nelt[0],lelt[0]); #endif int lxz2ldim = lx1[0]*lz1[0]*2*ldim[0]; int ntot = nelt[0]*lxz2ldim; int lxz2ldimlelt= lxz2ldim*lelt[0]; int blockSize = glbblockSize2[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); hipLaunchKernelGGL(( igu_cmt_gpu_wrapper1_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_flux,d_graduf,ntot,lxz2ldimlelt,toteq[0],iwp[0]); #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); printf("CUDA: End igu_cmt_gpu_wrapper1 cuda status: %s\n",hipGetErrorString(code2)); #endif } extern "C" void igu_cmt_gpu_wrapper2_(int *glbblockSize2,double *d_flux,double *d_graduf,char *d_cbc,double *d_unx,double *d_uny,double *d_unz,int *toteq,int *iwp,int *iwm,int *ldim,int *irho,int *icvf,int *ilamf,int *imuf,int *iux,int *iuy,int *iuz,int *iu5,int *if3d,int *lx1,int *ly1,int *lz1,int *nelt,int *lelt){ #ifdef DEBUGPRINT hipError_t code1 = hipPeekAtLastError(); printf("CUDA: Start igu_cmt_gpu_wrapper2 cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start igu_cmt_gpu_wrapper2 values toteq =%d ,iwp =%d,iwm =%d,ldim =%d,irho =%d,icvf =%d,ilamf =%d,imuf =%d,iux =%d,iuy =%d,iuz =%d,iu5 =%d,if3d =%d,lx1 =%d,ly1 =%d,lz1 =%d,nelt =%d,lelt =%d \n", toteq[0],iwp[0],iwm[0],ldim[0],irho[0],icvf[0],ilamf[0],imuf[0],iux[0],iuy[0],iuz[0],iu5[0],if3d[0],lx1[0],ly1[0],lz1[0],nelt[0],lelt[0] ); #endif int lxz=lx1[0]*lz1[0]; int a2ldim= 2*ldim[0]; int lxz2ldim = lxz*2*ldim[0]; int ntot = nelt[0]*lxz2ldim; int lxz2ldimlelt =lxz2ldim*lelt[0]; int lxz2ldimlelttoteq= lxz2ldimlelt*toteq[0]; int blockSize = glbblockSize2[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); hipLaunchKernelGGL(( igu_cmt_gpu_wrapper2_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_flux,d_graduf,d_cbc,d_unx,d_uny,d_unz,ntot,lxz2ldim,lxz2ldimlelt, lxz2ldimlelttoteq,toteq[0],iwp[0],iwm[0],ldim[0],irho[0],icvf[0],ilamf[0],imuf[0],iux[0],iuy[0],iuz[0],iu5[0],if3d[0],lxz,a2ldim); #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); printf("CUDA: End igu_cmt_gpu_wrapper2 cuda status: %s\n",hipGetErrorString(code2)); #endif }
8a6343ce9da4e92ebf26eaacb67a58891fcc8d72.cu
#include <stdio.h> #include <cuda_runtime_api.h> #include <cublas.h> #include "nvml.h" // includes, project //#include "magma.h" #include "cuda_multi_gemm_unif.cu" //#define DEBUGPRINT 0 __global__ void fluxes_full_field_gpu_kernel_fillq(double *vtrans, double *vx, double *vy, double *vz, double *pr, double *t, double *csound, double *phig, double *vdiff, double *fatface,int irho, int iux, int iuy, int iuz, int ipr, int ithm, int isnd, int iph, int icvf, int icpf, int imuf, int ikndf, int ilamf, int iwm, int iwp, int icv, int icp, int imu, int iknd, int ilam,int *iface_flux, int nnel, int nxz2ldim, int lxyz,int lxz, int ivarcoef,int leltlxyz ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<nnel){ int e = id/nxz2ldim; int j = id % nxz2ldim; //fillq vtrans int i = iface_flux[id]-1; // because forgot to -1 in the follows fatface[(iwp-1)+id] = vtrans[e*lxyz+i]; // following works because ndg_face is same as nnel. Talk with Dr. Tania. adeesha fatface[(iwm-1)+(irho-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vx fatface[(iwp-1)+id] = vx[e*lxyz+i]; fatface[(iwm-1)+(iux-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vy fatface[(iwp-1)+id] = vy[e*lxyz+i]; fatface[(iwm-1)+(iuy-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vz fatface[(iwp-1)+id] = vz[e*lxyz+i]; fatface[(iwm-1)+(iuz-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq pr fatface[(iwp-1)+id] = pr[e*lxyz+i]; fatface[(iwm-1)+(ipr-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq t fatface[(iwp-1)+id] = t[e*lxyz+i]; fatface[(iwm-1)+(ithm-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq csound fatface[(iwp-1)+id] = csound[e*lxyz+i]; fatface[(iwm-1)+(isnd-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq phig fatface[(iwp-1)+id] = phig[e*lxyz+i]; fatface[(iwm-1)+(iph-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vtrans icv fatface[(iwp-1)+id] = vtrans[(icv-1)*leltlxyz+e*lxyz+i]; fatface[(iwm-1)+(icvf-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vtrans icp fatface[(iwp-1)+id] = vtrans[(icp-1)*leltlxyz+e*lxyz+i]; fatface[(iwm-1)+(icpf-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vdiff imu fatface[(iwp-1)+id] = vdiff[(imu-1)*leltlxyz+e*lxyz+i]; fatface[(iwm-1)+(imuf-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vdiff iknd fatface[(iwp-1)+id] = vdiff[(iknd-1)*leltlxyz+e*lxyz+i]; fatface[(iwm-1)+(ikndf-1)*ivarcoef+id] = fatface[(iwp-1)+id]; //fillq vdiff ilam fatface[(iwp-1)+id] = vdiff[(ilam-1)*leltlxyz+e*lxyz+i]; fatface[(iwm-1)+(ilam-1)*ivarcoef+id] = fatface[(iwp-1)+id]; // if(id<10){ // printf("$$$ pfc i=%d, vtrans[%d]=%lf,vx[%d]=%lf,vy[%d]=%lf,vz[%d]=%lf,pr[%d]=%lf,t[%d]=%lf,csound[%d]=%lf,phig[%d]=%lf,vtrans[%d]=%lf,vdiff[%d]=%lf,vtrans[%d]=%lf \n",i,e*lxyz+i,vtrans[e*lxyz+i],e*lxyz+i,vx[e*lxyz+i],e*lxyz+i,vy[e*lxyz+i],e*lxyz+i,vz[e*lxyz+i],e*lxyz+i,pr[e*lxyz+i],e*lxyz+i,t[e*lxyz+i],e*lxyz+i,csound[e*lxyz+i],e*lxyz+i,phig[e*lxyz+i],e*lxyz+i,vtrans[e*lxyz+i],e*lxyz+i,vdiff[e*lxyz+i],(icp-1)*leltlxyz+e*lxyz+i,vtrans[(icp-1)*leltlxyz+e*lxyz+i]); // } } } __global__ void fluxes_full_field_gpu_kernel_faceu(double *fatface, double *u,int i_cvars, int nneltoteq, int nnel, int toteq, int lxyz, int iwm, int iph,int *iface_flux,int nxz2ldim,int ivarcoef){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<nneltoteq){ int ivar = id/nnel; int e_n = id%(nnel);; int e = e_n/nxz2ldim; int i = iface_flux[e_n]; //full2face_cmt fatface[(i_cvars-1)+id] =u[e*toteq*lxyz+ivar*lxyz+i-1]; fatface[(i_cvars-1)+id]= fatface[(i_cvars-1)+id]/fatface[iwm-1+ivarcoef*(iph-2)+id];// invcol2 // check with Dr.Tania. above functions may not work properly. } } extern "C" void fluxes_full_field_gpu_wrapper_(int *glbblockSize2,double *d_fatface,double *d_vtrans,double *d_u, double *d_vx, double *d_vy, double *d_vz, double *d_pr, double *d_t, double *d_csound, double *d_phig, double *d_vdiff, int *irho, int *iux, int *iuy, int *iuz, int *ipr, int *ithm, int *isnd, int *iph, int *icvf, int *icpf, int *imuf, int *ikndf, int *ilamf, int *iwm, int *iwp, int *icv, int *icp, int *imu, int *iknd, int *ilam,int *d_iface_flux, int *nelt, int *lx1, int *ly1, int *lz1, int *ldim, int *lelt, int *i_cvars,int *toteq, int *nqq){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); // if (code1 != cudaSuccess){ printf("CUDA: Start fluxes_full_field_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start fluxes_full_field_gpu_wrapper values irho = %d, iux= %d,iuy= %d,iuz= %d,ipr= %d,ithm= %d,isnd= %d,iph= %d, icvf= %d,icpf= %d,imuf= %d,ikndf= %d,ilamf= %d,iwm= %d,iwp= %d,icv= %d, icp= %d,imu= %d,iknd= %d,ilam= %d,nelt= %d,lx1= %d,ly1= %d,lz1= %d,ldim= %d,lelt= %d,i_cvars= %d,toteq= %d,nqq=%d \n",irho[0],iux[0],iuy[0],iuz[0],ipr[0],ithm[0],isnd[0],iph[0],icvf[0],icpf[0],imuf[0],ikndf[0],ilamf[0],iwm[0],iwp[0],icv[0],icp[0],imu[0],iknd[0],ilam[0],nelt[0],lx1[0],ly1[0],lz1[0],ldim[0],lelt[0],i_cvars[0],toteq[0],nqq[0]); // } #endif int nxz2ldim = lx1[0]*lz1[0]*2*ldim[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int leltlxyz = lelt[0]*lxyz; int lxz = lx1[0]*lz1[0]; int nnel = nelt[0]*nxz2ldim; int ivarcoef = nxz2ldim*lelt[0]; int nnelnqq = nnel*nqq[0]; int blockSize = glbblockSize2[0], gridSize; gridSize = (int)ceil((float)nnel/blockSize); fluxes_full_field_gpu_kernel_fillq<<<gridSize, blockSize>>>(d_vtrans, d_vx, d_vy, d_vz,d_pr, d_t, d_csound,d_phig,d_vdiff,d_fatface,irho[0],iux[0], iuy[0], iuz[0], ipr[0],ithm[0],isnd[0],iph[0],icvf[0],icpf[0],imuf[0],ikndf[0],ilamf[0],iwm[0],iwp[0],icv[0],icp[0],imu[0],iknd[0],ilam[0],d_iface_flux,nnel, nxz2ldim,lxyz,lxz,ivarcoef, leltlxyz); #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: fluxes_full_field_gpu_wrapper after fillq cuda status: %s\n",cudaGetErrorString(code1)); #endif gridSize = (int)ceil((float)nnel*toteq[0]/blockSize); fluxes_full_field_gpu_kernel_faceu<<<gridSize, blockSize>>>(d_fatface,d_u,i_cvars[0],nnel*toteq[0],nnel, toteq[0], lxyz,iwm[0],iph[0],d_iface_flux,nxz2ldim,ivarcoef); #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); // if (code1 != cudaSuccess){ printf("CUDA: End fluxes_full_field_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2)); #endif } __global__ void inviscidFlux_gpu_kernel1(double *jaco_c,double *area,double *wghtc,int ntot){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ jaco_c[id]= area[id]/wghtc[id]; } } __global__ void inviscidFlux_gpu_kernel2(double *jaco_f,double *wghtf,int ntotd){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntotd){ jaco_f[id]=jaco_f[id]*wghtf[id]; } } __global__ void inviscidFlux_gpu_kernel3(double *unx,double *uny,double *unz,double *nx,double *ny,double *nz,double *rl,double *ul,double *vl,double *wl,double *pl,double *tl,double *al,double *cpl,double *rr,double *ur,double *vr,double *wr,double *pr,double *tr,double *ar,double *cpr,double *phl,double *jaco_f,double *fatface,double *area,int iwm,int iwp,int irho,int iux,int iuy,int iuz,int ipr,int ithm,int isnd,int icpf,int iph,int lxz2ldimlelt,int ntot){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ nx[id]=unx[id]; ny[id]=uny[id]; nz[id]=unz[id]; rl[id]=fatface[iwm-1+(irho-1)*lxz2ldimlelt+id]; // send the calculate array index for the optimizations later. adeesha. ul[id]=fatface[iwm-1+(iux-1)*lxz2ldimlelt+id]; vl[id]=fatface[iwm-1+(iuy-1)*lxz2ldimlelt+id]; wl[id]=fatface[iwm-1+(iuz-1)*lxz2ldimlelt+id]; pl[id]=fatface[iwm-1+(ipr-1)*lxz2ldimlelt+id]; tl[id]=fatface[iwm-1+(ithm-1)*lxz2ldimlelt+id]; al[id]=fatface[iwm-1+(isnd-1)*lxz2ldimlelt+id]; cpl[id]=fatface[iwm-1+(icpf-1)*lxz2ldimlelt+id]; rr[id]=fatface[iwp-1+(irho-1)*lxz2ldimlelt+id]; ur[id]=fatface[iwp-1+(iux-1)*lxz2ldimlelt+id]; vr[id]=fatface[iwp-1+(iuy-1)*lxz2ldimlelt+id]; wr[id]=fatface[iwp-1+(iuz-1)*lxz2ldimlelt+id]; pr[id]=fatface[iwp-1+(ipr-1)*lxz2ldimlelt+id]; tr[id]=fatface[iwp-1+(ithm-1)*lxz2ldimlelt+id]; ar[id]=fatface[iwp-1+(isnd-1)*lxz2ldimlelt+id]; cpr[id]=fatface[iwp-1+(icpf-1)*lxz2ldimlelt+id]; phl[id]=fatface[iwp-1+(iph-1)*lxz2ldimlelt+id]; jaco_f[id]=area[id]; } } __global__ void inviscidFlux_gpu_kernel4(double *flx,double *phl,int ntotd){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntotd){ flx[id]=flx[id]*phl[id]; } } __global__ void Ausm_flux(int neq, int ntotd, double *nx, double *ny, double *nz, double *nm, double *fs, double *rl, double *ul, double *vl, double *wl, double *pl, double *al, double *tl, double *rr, double *ur, double *vr, double *wr, double *pr, double *ar, double *tr, double *flx, double *cpl, double *cpr){ int i = blockIdx.x*blockDim.x+threadIdx.x; //ntotd = nel * nfaces * nxzd if(i<ntotd){ cpl[i]=cpl[i]/rl[i];//invcol2 cpr[i]=cpr[i]/rl[i];//invcol2 fs[i] = 0;// it is 0 in cmtbone but can be changed double af,mf,mfa,mfm,mfp,ml,mla,mlp,mr,mra,mrm,pf,ql,qr,wtl,wtr,Hl,Hr; Hl = cpl[i]*tl[i] + 0.5*(ul[i]*ul[i]+vl[i]*vl[i]+wl[i]*wl[i]); Hr = cpr[i]*tr[i] + 0.5*(ur[i]*ur[i]+vr[i]*vr[i]+wr[i]*wr[i]); ql = ul[i]*nx[i] + vl[i]*ny[i] + wl[i]*nz[i] - fs[i]; qr = ur[i]*nx[i] + vr[i]*ny[i] + wr[i]*nz[i] - fs[i]; af = 0.5*(al[i] + ar[i]); ml = ql/af; mla = abs(ml); mr = qr/af; mra = abs(mr); if(mla <= 1.0){ mlp = 0.25*pow((ml+1.0),2) + 0.125*pow((ml*ml-1.0),2); wtl = 0.25*pow(ml+1.0,2)*(2.0-ml) + 0.1875*ml*pow(ml*ml-1.0,2); } else{ mlp = 0.5*(ml+mla); wtl = 0.5*(1.0+ml/mla); } if(mra <= 1.0){ mrm = -0.25*pow((mr-1.0),2) - 0.125*pow((mr*mr-1.0),2); wtr = 0.25*pow(mr-1.0,2)*(2.0+mr) - 0.1875*mr*pow(mr*mr-1.0,2); } else{ mrm = 0.5*(mr-mra); wtr = 0.5*(1.0-mr/mra); } mf = mlp + mrm; mfa = abs(mf); mfp = 0.5*(mf+mfa); mfm = 0.5*(mf-mfa); pf = wtl*pl[i] + wtr*pr[i]; //compute fluxes flx[i] = (af*(mfp*rl[i] + mfm*rr[i])) * nm[i]; flx[1*ntotd+i] = (af*(mfp*rl[i]*ul[i] + mfm*rr[i]*ur[i])+pf*nx[i]) * nm[i]; flx[2*ntotd+i] = (af*(mfp*rl[i]*vl[i] + mfm*rr[i]*vr[i])+pf*ny[i]) * nm[i]; flx[3*ntotd+i] = (af*(mfp*rl[i]*wl[i] + mfm*rr[i]*wr[i])+pf*nz[i]) * nm[i]; flx[4*ntotd+i] = (af*(mfp*rl[i]*Hl + mfm*rr[i]*Hr)+pf*fs[i]) * nm[i]; } } void map_faced(double *d_jgl, double *d_jgt, double *ju, double *u, double *d_w, int nx1, int nxd, int fdim, int nelt, int nfaces, int idir){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); printf("CUDA: Start map_faced cuda status: %s\n",cudaGetErrorString(code1)); #endif cudaStream_t stream; cudaStreamCreate( &stream ); const double alpha = 1; const double beta = 0; int nx1_2 = pow(nx1,2); int nxd_2 = pow(nxd,2); int batchSize = nelt*nfaces; int successvalue=-1; if(idir==0){ int blockSize = 1024, gridSize; //calc w(nxd,nx1) = jgl(nxd*nx1) * u(nx1,nx1) in fortran //calc w(nx1,nxd) = u(nx1,nx1) * jgl(nx1,nxd) in C gridSize = (int)ceil((float)nelt*nfaces*nx1*nxd/blockSize); successvalue = cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nx1, nx1, &alpha, d_jgl, nxd, 0, u, nx1, nx1_2, &beta, d_w, nxd, nx1*nxd, batchSize, gridSize); #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: map_faced afte 1st multigemm in idir=0 successvalue =%d and cuda status: %s\n",successvalue,cudaGetErrorString(code1)); #endif //calc ju(nxd,nxd) = w(nxd,nx1) * jgt(nx1,nxd) in fortran //calc ju(nxd,nxd) = jgt(nxd,nx1) * w(nx1,nxd) gridSize = (int)ceil((float)nelt*nfaces*nxd*nxd/blockSize); successvalue= cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nx1, nxd, &alpha, d_w, nxd, nx1*nxd, d_jgt, nx1, 0, &beta, ju, nxd, nxd_2, batchSize, gridSize); #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: map_faced afte 2nd multigemm in idir=0 successvalue =%d and cuda status: %s\n",successvalue,cudaGetErrorString(code1)); #endif } else{ int blockSize = 1024, gridSize; //calc w(nx1,nxd) = jgt(nx1,nxd) * u(nxd,nxd) in fortran //calc w(nxd,nx1) = u(nxd,nxd) * jgt(nxd,nx1) in C successvalue= gridSize = (int)ceil((float)nelt*nfaces*nx1*nxd/blockSize); cuda_multi_gemm_unif(stream, 'N', 'N', nx1, nxd, nxd, &alpha, d_jgt, nx1, 0, u, nxd, nxd_2, &beta, d_w, nx1, nx1*nxd, batchSize, gridSize); #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: map_faced afte 1st multigemm in idir!=0 successvalue =%d and cuda status: %s\n",successvalue,cudaGetErrorString(code1)); #endif //calc ju(nx1,nx1) = w(nx1,nxd) * jgl(nxd,nx1) in fortran //calc ju(nx1,nx1) = jgl(nx1,nxd) * w(nxd,nx1) in C gridSize = (int)ceil((float)nelt*nfaces*nx1*nx1/blockSize); successvalue= cuda_multi_gemm_unif(stream, 'N', 'N', nx1, nxd, nx1, &alpha, d_w, nx1, nx1*nxd, d_jgl, nxd, 0, &beta, ju, nx1, nx1_2, batchSize, gridSize); #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: map_faced afte 2nd multigemm in idir!=0 successvalue =%d and cuda status: %s\n",successvalue,cudaGetErrorString(code1)); #endif } cudaStreamDestroy(stream); } extern "C" void inviscidflux_gpu_wrapper_(int *glbblockSize2,double *jgl,double *jgt,double *d_unx,double *d_uny,double *d_unz,double *d_area,double *d_fatface,double *d_wghtc,double *d_wghtf,int *irho,int *iux,int *iuy,int *iuz,int *ipr,int *ithm,int *isnd,int *icpf,int *iph,int *iwm,int *iwp,int *iflx,int *ldim,int *lxd,int *lzd,int *nelt,int *lelt,int *toteq,int *lx1,int *ly1,int *lz1 ){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); printf("CUDA: Start inviscidFlux_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start inviscidFlux_gpu_wrapper values irho = %d, iux= %d,iuy= %d,iuz= %d,ipr= %d,ithm= %d,isnd= %d,iph= %d,icpf= %d,iwm= %d,iwp= %d,nelt= %d,lx1= %d,ly1= %d,lz1= %d,ldim= %d,lelt= %d,toteq= %d \n",irho[0],iux[0],iuy[0],iuz[0],ipr[0],ithm[0],isnd[0],iph[0],icpf[0],iwm[0],iwp[0],nelt[0],lx1[0],ly1[0],lz1[0],ldim[0],lelt[0],toteq[0]); #endif int fdim = ldim[0]-1; int nfaces = 2*ldim[0]; int lxz=lx1[0]*lz1[0]; int ntot= nelt[0]*nfaces*lxz; int ntotd = nelt[0] * nfaces * lxd[0]*lzd[0]; int lxd_3= lxd[0]*lxd[0]*lxd[0]; int nel=nelt[0]; int lxz2ldim=lxz*nfaces; int lxz2ldimlelt=lxz2ldim*lelt[0]; double *d_jgl; double *d_jgt; cudaMalloc(&d_jgl, lxd_3*sizeof(double)); cudaMalloc(&d_jgt, lxd_3*sizeof(double)); cudaMemcpy(d_jgl, jgl,lxd_3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_jgt, jgt,lxd_3*sizeof(double), cudaMemcpyHostToDevice); double *d_w; double *d_nx; double *d_ny; double *d_nz; double *d_rl; double *d_ul; double *d_wl; double *d_vl; double *d_pl; double *d_tl; double *d_al; double *d_cpl; double *d_rr; double *d_ur; double *d_wr; double *d_vr; double *d_pr; double *d_tr; double *d_ar; double *d_cpr; double *d_jaco_c; double *d_jaco_f; double *d_phl; double *d_fs; double *d_flx; cudaMalloc(&d_w, ntotd*sizeof(double)); cudaMalloc(&d_nx, ntotd*sizeof(double)); cudaMalloc(&d_ny, ntotd*sizeof(double)); cudaMalloc(&d_nz, ntotd*sizeof(double)); cudaMalloc(&d_rl, ntotd*sizeof(double)); cudaMalloc(&d_ul, ntotd*sizeof(double)); cudaMalloc(&d_wl, ntotd*sizeof(double)); cudaMalloc(&d_vl, ntotd*sizeof(double)); cudaMalloc(&d_pl, ntotd*sizeof(double)); cudaMalloc(&d_tl, ntotd*sizeof(double)); cudaMalloc(&d_al, ntotd*sizeof(double)); cudaMalloc(&d_cpl, ntotd*sizeof(double)); cudaMalloc(&d_rr, ntotd*sizeof(double)); cudaMalloc(&d_ur, ntotd*sizeof(double)); cudaMalloc(&d_vr, ntotd*sizeof(double)); cudaMalloc(&d_wr, ntotd*sizeof(double)); cudaMalloc(&d_pr, ntotd*sizeof(double)); cudaMalloc(&d_tr, ntotd*sizeof(double)); cudaMalloc(&d_ar, ntotd*sizeof(double)); cudaMalloc(&d_cpr, ntotd*sizeof(double)); cudaMalloc(&d_jaco_c, ntot*sizeof(double)); cudaMalloc(&d_jaco_f, ntotd*sizeof(double)); cudaMalloc(&d_phl, ntotd*sizeof(double)); cudaMalloc(&d_fs, ntotd*sizeof(double)); cudaMalloc(&d_flx, ntotd*toteq[0]*sizeof(double)); int totpts =lxz2ldimlelt; int blockSize = glbblockSize2[0], gridSize; if(lxd[0]>lx1[0]){ map_faced(d_jgl, d_jgt, d_nx, d_unx, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_ny, d_uny, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_nz, d_unz, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_rl, d_fatface+iwm[0]-1+(irho[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_ul, d_fatface+iwm[0]-1+(iux[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_vl, d_fatface+iwm[0]-1+(iuy[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_wl, d_fatface+iwm[0]-1+(iuz[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_pl, d_fatface+iwm[0]-1+(ipr[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_tl, d_fatface+iwm[0]-1+(ithm[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_al, d_fatface+iwm[0]-1+(isnd[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_cpl, d_fatface+iwm[0]-1+(icpf[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_rr, d_fatface+iwp[0]-1+(irho[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_ur, d_fatface+iwp[0]-1+(iux[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_vr, d_fatface+iwp[0]-1+(iuy[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_wr, d_fatface+iwp[0]-1+(iuz[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_pr, d_fatface+iwp[0]-1+(ipr[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_tr, d_fatface+iwp[0]-1+(ithm[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_ar, d_fatface+iwp[0]-1+(isnd[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); map_faced(d_jgl, d_jgt, d_cpr, d_fatface+iwp[0]-1+(icpf[0]-1)*totpts, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA:inviscidFlux_gpu_wrapper after map_faced cuda status: %s\n",cudaGetErrorString(code1)); #endif gridSize = (int)ceil((float)ntot/blockSize); inviscidFlux_gpu_kernel1<<<gridSize, blockSize>>>(d_jaco_c,d_area,d_wghtc,ntot); map_faced(d_jgl, d_jgt, d_jaco_f, d_jaco_c, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 0); gridSize = (int)ceil((float)ntotd/blockSize); inviscidFlux_gpu_kernel2<<<gridSize, blockSize>>>(d_jaco_f,d_wghtf,ntotd); #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: inviscidFlux_gpu_wrapper after kernen2 cuda status: %s\n",cudaGetErrorString(code1)); #endif } else{ gridSize = (int)ceil((float)ntot/blockSize); inviscidFlux_gpu_kernel3<<<gridSize, blockSize>>>(d_unx,d_uny,d_unz,d_nx,d_ny,d_nz,d_rl,d_ul,d_vl,d_wl,d_pl,d_tl,d_al,d_cpl,d_rr,d_ur,d_vr,d_wr,d_pr,d_tr,d_ar,d_cpr,d_phl,d_jaco_f,d_fatface,d_area,iwm[0],iwp[0],irho[0],iux[0],iuy[0],iuz[0],ipr[0],ithm[0],isnd[0],icpf[0],iph[0],lxz2ldimlelt,ntot ); } gridSize = (int)ceil((float)ntotd/blockSize); Ausm_flux<<<gridSize, blockSize>>>(toteq[0],ntotd, d_nx, d_ny, d_nz, d_jaco_f, d_fs, d_rl, d_ul, d_vl, d_wl, d_pl, d_al, d_tl, d_rr, d_ur, d_vr, d_wr, d_pr, d_ar, d_tr, d_flx, d_cpl, d_cpr); if(lxd[0]>lx1[0]){ for(int j=0; j<toteq[0];j++){ gridSize = (int)ceil((float)ntotd/blockSize); inviscidFlux_gpu_kernel4<<<gridSize, blockSize>>>(d_flx+j*ntotd,d_phl,ntotd); map_faced(d_jgl, d_jgt,d_fatface+iflx[0]-1+j*totpts, d_flx+j*ntotd, d_w, lx1[0], lxd[0], fdim, nel, nfaces, 1); } #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: inviscidFlux_gpu_wrapper after forloop cuda status: %s\n",cudaGetErrorString(code1)); #endif } else{ printf("else part yet to implemented \n"); } cudaFree(d_jgl); cudaFree(d_jgt); cudaFree(d_nx); cudaFree(d_ny); cudaFree(d_nz); cudaFree(d_rl); cudaFree(d_ul); cudaFree(d_wl); cudaFree(d_vl); cudaFree(d_pl); cudaFree(d_tl); cudaFree(d_al); cudaFree(d_cpl); cudaFree(d_rr); cudaFree(d_ur); cudaFree(d_wr); cudaFree(d_vr); cudaFree(d_pr); cudaFree(d_tr); cudaFree(d_ar); cudaFree(d_cpr); cudaFree(d_jaco_c); cudaFree(d_jaco_f); cudaFree(d_phl); cudaFree(d_fs); cudaFree(d_flx); #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: End inviscidFlux_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); #endif } __global__ void surface_integral_full_gpu_kernel(double *iface_flux, double *res1, double *flux, int ntot,int lxyz,int lxyzlelt,int nxz2ldim,int eq ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e = id/nxz2ldim; //add_face2full_cmt(nel,nx,ny,nz,iface,vols,faces) int newi=iface_flux[id]; res1[eq*lxyzlelt+e*lxyz+newi-1]= res1[eq*lxyzlelt+e*lxyz+newi-1]+flux[id]; } } extern "C" void surface_integral_full_gpu_wrapper_(int *glbblockSize2,double *d_res1,double *d_flux,int *eq,int *ieq,int *nelt, int *lelt, int *toteq,int *lx1,int *ly1,int *lz1,int *ldim,double *d_iface_flux ){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); printf("CUDA: Start surface_integral_full_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start surface_integral_full_gpu_wrapper values eq = %d,ieq= %d,nelt= %d,lelt=%d,toteq= %d,lx1=%d,ly1= %d,lz1= %d,ldim= %d\n",eq[0],ieq[0],nelt[0],lelt[0],toteq[0],lx1[0],ly1[0],lz1[0],ldim[0]); #endif int nxz2ldim = lx1[0]*lz1[0]*2*ldim[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int lxyzlelt = lelt[0]*lxyz; int ntot = nelt[0]*nxz2ldim; int ivarcoef = nxz2ldim*lelt[0]; int blockSize = glbblockSize2[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); surface_integral_full_gpu_kernel<<<gridSize, blockSize>>>(d_iface_flux,d_res1,d_flux+ieq[0]-1,ntot,lxyz,lxyzlelt,nxz2ldim,eq[0]); #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); printf("CUDA: End surface_integral_full_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2)); #endif } __global__ void igu_cmt_gpu_wrapper1_gpu_kernel(double *flux, double *graduf,int ntot,int lxz2ldimlelt,int toteq,int iwp ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ for(int eq=0;eq<toteq;eq++){ flux[iwp-1+eq*lxz2ldimlelt +id]=graduf[eq*lxz2ldimlelt+id]; graduf[eq*lxz2ldimlelt+id]=graduf[eq*lxz2ldimlelt+id]*0.5; } } } __global__ void igu_cmt_gpu_wrapper2_gpu_kernel(double *flux,double *graduf,char *cbc,double *unx,double *uny,double *unz,int ntot,int lxz2ldim,int lxz2ldimlelt,int lxz2ldimlelttoteq,int toteq,int iwp,int iwm,int ldim,int irho,int icvf,int ilamf,int imuf,int iux,int iuy,int iuz,int iu5,int if3d,int lxz,int a2ldim ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e = id/lxz2ldim; int iface= ((id/lxz)%a2ldim); for(int eq=0;eq<toteq;eq++){ flux[iwp-1+eq*lxz2ldimlelt +id]=flux[iwp-1+eq*lxz2ldimlelt +id]-graduf[eq*lxz2ldimlelt+id]; } //igu_dirichlet(flxscr,gdudxk) char cb2= cbc[e*18+iface]; if(cb2 !='E' && cb2 !='P'){ for (int eq=0;eq<toteq;eq++){ graduf[eq*lxz2ldimlelt+id] =graduf[eq*lxz2ldimlelt+id]*2.0; flux[iwp-1+eq*lxz2ldimlelt +id]=graduf[eq*lxz2ldimlelt+id]; } } //bcflux(flxscr,gdudxk,wminus) if (cbc[e*18+iface]!='E'&& cbc[e*18+iface]!='P'){ char cb1= cbc[e*18+iface]; if(cb1=='I'){ flux[iwp-1+id]=0; for (int eq=1;eq<ldim+1;eq++){ flux[iwp-1+eq*lxz2ldimlelt +id]=graduf[eq*lxz2ldimlelt+id]; } flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]=0; //a5adiabatic_wall(flux(1,1,1,toteq),f,e,agradu,qminus) //call a51dUadia(flxscr,f,e,dU,wstate) double dU1x=graduf[0*lxz2ldimlelttoteq+0*lxz2ldimlelt+id]; double dU2x=graduf[0*lxz2ldimlelttoteq+1*lxz2ldimlelt+id]; double dU3x=graduf[0*lxz2ldimlelttoteq+2*lxz2ldimlelt+id]; double dU4x=graduf[0*lxz2ldimlelttoteq+3*lxz2ldimlelt+id]; double dU5x=graduf[0*lxz2ldimlelttoteq+4*lxz2ldimlelt+id]; double dU1y=graduf[1*lxz2ldimlelttoteq+0*lxz2ldimlelt+id]; double dU2y=graduf[1*lxz2ldimlelttoteq+1*lxz2ldimlelt+id]; double dU3y=graduf[1*lxz2ldimlelttoteq+2*lxz2ldimlelt+id]; double dU4y=graduf[1*lxz2ldimlelttoteq+3*lxz2ldimlelt+id]; double dU5y=graduf[1*lxz2ldimlelttoteq+4*lxz2ldimlelt+id]; double dU1z=graduf[2*lxz2ldimlelttoteq+0*lxz2ldimlelt+id]; double dU2z=graduf[2*lxz2ldimlelttoteq+1*lxz2ldimlelt+id]; double dU3z=graduf[2*lxz2ldimlelttoteq+2*lxz2ldimlelt+id]; double dU4z=graduf[2*lxz2ldimlelttoteq+3*lxz2ldimlelt+id]; double dU5z=graduf[2*lxz2ldimlelttoteq+4*lxz2ldimlelt+id]; double rho =flux[iwm-1+(irho-1)*lxz2ldimlelt +id]; double cv =flux[iwm-1+(icvf-1)*lxz2ldimlelt +id]/rho; double lambda=flux[iwm-1+(ilamf-1)*lxz2ldimlelt +id]; double mu =flux[iwm-1+(imuf-1)*lxz2ldimlelt +id]; double K =0.0;// ADIABATIC HARDCODING double u1 =flux[iwm-1+(iux-1)*lxz2ldimlelt +id]; double u2 =flux[iwm-1+(iuy-1)*lxz2ldimlelt +id]; double u3 =flux[iwm-1+(iuz-1)*lxz2ldimlelt +id]; double E =flux[iwm-1+(iu5-1)*lxz2ldimlelt +id]/rho; double lambdamu=lambda+mu; double kmcvmu=K-cv*mu; double t_flux=(K*dU5x+cv*lambda*u1*dU4z-kmcvmu*u3*dU4x+cv*lambda*u1*dU3y-kmcvmu*u2*dU3x+cv*mu*u3*dU2z+cv*mu*u2*dU2y+(cv*lambda-K+2*cv*mu)*u1*dU2x-cv*lambdamu*u1*u3*dU1z-cv*lambdamu*u1*u2*dU1y+(K*u3*u3-cv*mu*u3*u3+K*u2*u2-cv*mu*u2*u2-cv*lambda*u1*u1+K*u1*u1-2*cv*mu*u1*u1-E*K)*dU1x)/(cv*rho); flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]=flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]+t_flux*unx[id]; //a52dUadia_gpu(flux,f,ie,dU,wstate) t_flux=(K*dU5y+cv*lambda*u2*dU4z-kmcvmu*u3*dU4y+cv*mu*u3*dU3z+(cv*lambda-K+2*cv*mu)*u2*dU3y+cv*mu*u1*dU3x-kmcvmu*u1*dU2y+cv*lambda*u2*dU2x-cv*lambdamu*u2*u3*dU1z+(K*u3*u3-cv*mu*u3*u3-cv*lambda*u2*u2+K*u2*u2-2*cv*mu*u2*u2+K*u1*u1-cv*mu*u1*u1-E*K)*dU1y-cv*lambdamu*u1*u2*dU1x)/(cv*rho); flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]=flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]+t_flux*uny[id]; if(if3d){ t_flux=(K*(dU5z-E*dU1z)+cv*u3*(lambda*dU4z+2*mu*dU4z+lambda*dU3y+lambda*dU2x)-K*u3*dU4z+cv*mu*u2*(dU4y+dU3z)+cv*mu*u1*(dU4x+dU2z)-K*u2*dU3z-K*u1*dU2z-cv*(lambda+2*mu)*u3*u3*dU1z+K*u3*u3*dU1z+K*u2*u2*dU1z-cv*mu*u2*u2*dU1z+K*u1*u1*dU1z-cv*mu*u1*u1*dU1z-cv*(lambda+mu)*u2*u3*dU1y-cv*(lambda+mu)*u1*u3*dU1x)/(cv*rho); flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]=flux[iwp-1+(toteq-1)*lxz2ldimlelt +id]+t_flux*unz[id]; } } else{ for (int eq=0;eq<toteq;eq++){ flux[iwp-1+eq*lxz2ldimlelt+id]=0; } } } //chsign(flxscr,ntot) for (int eq=0;eq<toteq;eq++){ flux[iwp-1+eq*lxz2ldimlelt +id]=-1*flux[iwp-1+eq*lxz2ldimlelt +id]; } } } extern "C" void igu_cmt_gpu_wrapper1_(int *glbblockSize2,double *d_flux,double *d_graduf,int *toteq,int *iwp,int *lx1,int *ly1,int *lz1,int *ldim,int *nelt,int *lelt){ #ifdef DEBUGPRINT cudaError_t code1 = cudaPeekAtLastError(); printf("CUDA: Start igu_cmt_gpu_wrapper1 cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start igu_cmt_gpu_wrapper1 values toteq=%d ,iwp=%d ,lx1=%d ,ly1=%d ,lz1=%d ,ldim=%d ,nelt=%d ,lelt=%d ,\n",toteq[0],iwp[0],lx1[0],ly1[0],lz1[0],ldim[0],nelt[0],lelt[0]); #endif int lxz2ldim = lx1[0]*lz1[0]*2*ldim[0]; int ntot = nelt[0]*lxz2ldim; int lxz2ldimlelt= lxz2ldim*lelt[0]; int blockSize = glbblockSize2[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); igu_cmt_gpu_wrapper1_gpu_kernel<<<gridSize, blockSize>>>(d_flux,d_graduf,ntot,lxz2ldimlelt,toteq[0],iwp[0]); #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); printf("CUDA: End igu_cmt_gpu_wrapper1 cuda status: %s\n",cudaGetErrorString(code2)); #endif } extern "C" void igu_cmt_gpu_wrapper2_(int *glbblockSize2,double *d_flux,double *d_graduf,char *d_cbc,double *d_unx,double *d_uny,double *d_unz,int *toteq,int *iwp,int *iwm,int *ldim,int *irho,int *icvf,int *ilamf,int *imuf,int *iux,int *iuy,int *iuz,int *iu5,int *if3d,int *lx1,int *ly1,int *lz1,int *nelt,int *lelt){ #ifdef DEBUGPRINT cudaError_t code1 = cudaPeekAtLastError(); printf("CUDA: Start igu_cmt_gpu_wrapper2 cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start igu_cmt_gpu_wrapper2 values toteq =%d ,iwp =%d,iwm =%d,ldim =%d,irho =%d,icvf =%d,ilamf =%d,imuf =%d,iux =%d,iuy =%d,iuz =%d,iu5 =%d,if3d =%d,lx1 =%d,ly1 =%d,lz1 =%d,nelt =%d,lelt =%d \n", toteq[0],iwp[0],iwm[0],ldim[0],irho[0],icvf[0],ilamf[0],imuf[0],iux[0],iuy[0],iuz[0],iu5[0],if3d[0],lx1[0],ly1[0],lz1[0],nelt[0],lelt[0] ); #endif int lxz=lx1[0]*lz1[0]; int a2ldim= 2*ldim[0]; int lxz2ldim = lxz*2*ldim[0]; int ntot = nelt[0]*lxz2ldim; int lxz2ldimlelt =lxz2ldim*lelt[0]; int lxz2ldimlelttoteq= lxz2ldimlelt*toteq[0]; int blockSize = glbblockSize2[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); igu_cmt_gpu_wrapper2_gpu_kernel<<<gridSize, blockSize>>>(d_flux,d_graduf,d_cbc,d_unx,d_uny,d_unz,ntot,lxz2ldim,lxz2ldimlelt, lxz2ldimlelttoteq,toteq[0],iwp[0],iwm[0],ldim[0],irho[0],icvf[0],ilamf[0],imuf[0],iux[0],iuy[0],iuz[0],iu5[0],if3d[0],lxz,a2ldim); #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); printf("CUDA: End igu_cmt_gpu_wrapper2 cuda status: %s\n",cudaGetErrorString(code2)); #endif }
db6ca7f99bbae594107e3c4fb70c282f003c80b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <rmm/thrust_rmm_allocator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/scan.h> #include <thrust/tuple.h> #include <cudf/detail/utilities/release_assert.cuh> #include <cudf/utilities/bit.hpp> #include <io/utilities/block_utils.cuh> #include "parquet_gpu.h" #define LOG2_NTHREADS (5 + 2) #define NTHREADS (1 << LOG2_NTHREADS) #define NZ_BFRSZ (NTHREADS * 2) inline __device__ uint32_t rotl32(uint32_t x, uint32_t r) { return __funnelshift_l(x, x, r); // (x << r) | (x >> (32 - r)); } inline __device__ int rolling_index(int index) { return index & (NZ_BFRSZ - 1); } namespace cudf { namespace io { namespace parquet { namespace gpu { struct page_state_s { const uint8_t *data_start; const uint8_t *data_end; const uint8_t *dict_base; // ptr to dictionary page data int32_t dict_size; // size of dictionary data int32_t first_row; // First row in page to output int32_t num_rows; // Rows in page to decode (including rows to be skipped) int32_t first_output_value; // First value in page to output int32_t num_input_values; // total # of input/level values in the page int32_t dtype_len; // Output data type length int32_t dtype_len_in; // Can be larger than dtype_len if truncating 32-bit into 8-bit int32_t dict_bits; // # of bits to store dictionary indices uint32_t dict_run; int32_t dict_val; uint32_t initial_rle_run[NUM_LEVEL_TYPES]; // [def,rep] int32_t initial_rle_value[NUM_LEVEL_TYPES]; // [def,rep] int32_t error; PageInfo page; ColumnChunkDesc col; // (leaf) value decoding int32_t nz_count; // number of valid entries in nz_idx (write position in circular buffer) int32_t dict_pos; // write position of dictionary indices int32_t out_pos; // read position of final output int32_t ts_scale; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale uint32_t nz_idx[NZ_BFRSZ]; // circular buffer of non-null value positions uint32_t dict_idx[NZ_BFRSZ]; // Dictionary index, boolean, or string offset values uint32_t str_len[NZ_BFRSZ]; // String length for plain encoding of strings // repetition/definition level decoding int32_t input_value_count; // how many values of the input we've processed int32_t input_row_count; // how many rows of the input we've processed int32_t input_leaf_count; // how many leaf values of the input we've processed uint32_t rep[NZ_BFRSZ]; // circular buffer of repetition level values uint32_t def[NZ_BFRSZ]; // circular buffer of definition level values const uint8_t *lvl_start[NUM_LEVEL_TYPES]; // [def,rep] int32_t lvl_count[NUM_LEVEL_TYPES]; // how many of each of the streams we've decoded int32_t row_index_lower_bound; // lower bound of row indices we should process }; /** * @brief Computes a 32-bit hash when given a byte stream and range. * * MurmurHash3_32 implementation from * https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp * * MurmurHash3 was written by Austin Appleby, and is placed in the public * domain. The author hereby disclaims copyright to this source code. * * @param[in] key The input data to hash * @param[in] len The length of the input data * @param[in] seed An initialization value * * @return The hash value */ __device__ uint32_t device_str2hash32(const char *key, size_t len, uint32_t seed = 33) { const uint8_t *p = reinterpret_cast<const uint8_t *>(key); uint32_t h1 = seed, k1; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; int l = len; // body while (l >= 4) { k1 = p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); k1 *= c1; k1 = rotl32(k1, 15); k1 *= c2; h1 ^= k1; h1 = rotl32(h1, 13); h1 = h1 * 5 + 0xe6546b64; p += 4; l -= 4; } // tail k1 = 0; switch (l) { case 3: k1 ^= p[2] << 16; case 2: k1 ^= p[1] << 8; case 1: k1 ^= p[0]; k1 *= c1; k1 = rotl32(k1, 15); k1 *= c2; h1 ^= k1; } // finalization h1 ^= len; h1 ^= h1 >> 16; h1 *= 0x85ebca6b; h1 ^= h1 >> 13; h1 *= 0xc2b2ae35; h1 ^= h1 >> 16; return h1; } /** * @brief Read a 32-bit varint integer * * @param[in,out] cur The current data position, updated after the read * @param[in] end The end data position * * @return The 32-bit value read */ inline __device__ uint32_t get_vlq32(const uint8_t *&cur, const uint8_t *end) { uint32_t v = *cur++; if (v >= 0x80 && cur < end) { v = (v & 0x7f) | ((*cur++) << 7); if (v >= (0x80 << 7) && cur < end) { v = (v & ((0x7f << 7) | 0x7f)) | ((*cur++) << 14); if (v >= (0x80 << 14) && cur < end) { v = (v & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 21); if (v >= (0x80 << 21) && cur < end) { v = (v & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 28); } } } } return v; } /** * @brief Parse the beginning of the level section (definition or repetition), * initializes the initial RLE run & value, and returns the section length * * @param[in,out] s The page state * @param[in] cur The current data position * @param[in] end The end of the data * @param[in] level_bits The bits required * * @return The length of the section */ __device__ uint32_t InitLevelSection(page_state_s *s, const uint8_t *cur, const uint8_t *end, level_type lvl) { int32_t len; int level_bits = s->col.level_bits[lvl]; int encoding = lvl == level_type::DEFINITION ? s->page.definition_level_encoding : s->page.repetition_level_encoding; if (level_bits == 0) { len = 0; s->initial_rle_run[lvl] = s->page.num_input_values * 2; // repeated value s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else if (encoding == RLE) { if (cur + 4 < end) { uint32_t run; len = 4 + (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24); cur += 4; run = get_vlq32(cur, end); s->initial_rle_run[lvl] = run; if (!(run & 1)) { int v = (cur < end) ? cur[0] : 0; cur++; if (level_bits > 8) { v |= ((cur < end) ? cur[0] : 0) << 8; cur++; } s->initial_rle_value[lvl] = v; } s->lvl_start[lvl] = cur; if (cur > end) { s->error = 2; } } else { len = 0; s->error = 2; } } else if (encoding == BIT_PACKED) { len = (s->page.num_input_values * level_bits + 7) >> 3; s->initial_rle_run[lvl] = ((s->page.num_input_values + 7) >> 3) * 2 + 1; // literal run s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else { s->error = 3; len = 0; } return (uint32_t)len; } /** * @brief Decode values out of a definition or repetition stream * * @param[in,out] s Page state input/output * @param[in] t target_count Target count of stream values on output * @param[in] t Warp0 thread ID (0..31) * @param[in] lvl The level type we are decoding - DEFINITION or REPETITION */ __device__ void gpuDecodeStream( uint32_t *output, page_state_s *s, int32_t target_count, int t, level_type lvl) { const uint8_t *cur_def = s->lvl_start[lvl]; const uint8_t *end = s->data_start; uint32_t level_run = s->initial_rle_run[lvl]; int32_t level_val = s->initial_rle_value[lvl]; int level_bits = s->col.level_bits[lvl]; int32_t num_input_values = s->num_input_values; int32_t value_count = s->lvl_count[lvl]; int32_t batch_coded_count = 0; while (value_count < target_count && value_count < num_input_values) { int batch_len; if (level_run <= 1) { // Get a new run symbol from the byte stream int sym_len = 0; if (!t) { const uint8_t *cur = cur_def; if (cur < end) { level_run = get_vlq32(cur, end); } if (!(level_run & 1)) { if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8) { if (cur < end) level_val |= cur[0] << 8; cur++; } } if (cur > end || level_run <= 1) { s->error = 0x10; } sym_len = (int32_t)(cur - cur_def); __threadfence_block(); } sym_len = SHFL0(sym_len); level_val = SHFL0(level_val); level_run = SHFL0(level_run); cur_def += sym_len; } if (s->error) { break; } batch_len = min(num_input_values - value_count, 32); if (level_run & 1) { // Literal run int batch_len8; batch_len = min(batch_len, (level_run >> 1) * 8); batch_len8 = (batch_len + 7) >> 3; if (t < batch_len) { int bitpos = t * level_bits; const uint8_t *cur = cur_def + (bitpos >> 3); bitpos &= 7; if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8 - bitpos && cur < end) { level_val |= cur[0] << 8; cur++; if (level_bits > 16 - bitpos && cur < end) level_val |= cur[0] << 16; } level_val = (level_val >> bitpos) & ((1 << level_bits) - 1); } level_run -= batch_len8 * 2; cur_def += batch_len8 * level_bits; } else { // Repeated value batch_len = min(batch_len, level_run >> 1); level_run -= batch_len * 2; } if (t < batch_len) { int idx = value_count + t; output[idx & (NZ_BFRSZ - 1)] = level_val; } batch_coded_count += batch_len; value_count += batch_len; } // update the stream info if (!t) { s->lvl_start[lvl] = cur_def; s->initial_rle_run[lvl] = level_run; s->initial_rle_value[lvl] = level_val; s->lvl_count[lvl] = value_count; } } /** * @brief Performs RLE decoding of dictionary indexes * * @param[in,out] s Page state input/output * @param[in] target_pos Target index position in dict_idx buffer (may exceed this value by up to * 31) * @param[in] t Warp1 thread ID (0..31) * * @return The new output position */ __device__ int gpuDecodeDictionaryIndices(volatile page_state_s *s, int target_pos, int t) { const uint8_t *end = s->data_end; int dict_bits = s->dict_bits; int pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t *cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value int bytecnt = (dict_bits + 7) >> 3; if (cur + bytecnt <= end) { int32_t run_val = cur[0]; if (bytecnt > 1) { run_val |= cur[1] << 8; if (bytecnt > 2) { run_val |= cur[2] << 16; if (bytecnt > 3) { run_val |= cur[3] << 24; } } } s->dict_val = run_val & ((1 << dict_bits) - 1); } cur += bytecnt; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8 * dict_bits; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } SYNCWARP(); is_literal = SHFL0(is_literal); batch_len = SHFL0(batch_len); if (t < batch_len) { int dict_idx = s->dict_val; if (is_literal) { int32_t ofs = (t - ((batch_len + 7) & ~7)) * dict_bits; const uint8_t *p = s->data_start + (ofs >> 3); ofs &= 7; if (p < end) { uint32_t c = 8 - ofs; dict_idx = (*p++) >> ofs; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; } } } dict_idx &= (1 << dict_bits) - 1; } } s->dict_idx[(pos + t) & (NZ_BFRSZ - 1)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Performs RLE decoding of dictionary indexes, for when dict_size=1 * * @param[in,out] s Page state input/output * @param[in] target_pos Target write position * @param[in] t Thread ID * * @return The new output position */ __device__ int gpuDecodeRleBooleans(volatile page_state_s *s, int target_pos, int t) { const uint8_t *end = s->data_end; int pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t *cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value s->dict_val = (cur < end) ? cur[0] & 1 : 0; cur++; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); if (batch_len >= 8) { batch_len &= ~7; } batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } SYNCWARP(); is_literal = SHFL0(is_literal); batch_len = SHFL0(batch_len); if (t < batch_len) { int dict_idx; if (is_literal) { int32_t ofs = t - ((batch_len + 7) & ~7); const uint8_t *p = s->data_start + (ofs >> 3); dict_idx = (p < end) ? (p[0] >> (ofs & 7u)) & 1 : 0; } else { dict_idx = s->dict_val; } s->dict_idx[(pos + t) & (NZ_BFRSZ - 1)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Parses the length and position of strings * * @param[in,out] s Page state input/output * @param[in] target_pos Target output position * @param[in] t Thread ID * * @return The new output position */ __device__ void gpuInitStringDescriptors(volatile page_state_s *s, int target_pos, int t) { int pos = s->dict_pos; // This step is purely serial if (!t) { const uint8_t *cur = s->data_start; int dict_size = s->dict_size; int k = s->dict_val; while (pos < target_pos) { int len; if (k + 4 <= dict_size) { len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24); k += 4; if (k + len > dict_size) { len = 0; } } else { len = 0; } s->dict_idx[pos & (NZ_BFRSZ - 1)] = k; s->str_len[pos & (NZ_BFRSZ - 1)] = len; k += len; pos++; } s->dict_val = k; __threadfence_block(); } } /** * @brief Output a string descriptor * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash) */ inline __device__ void gpuOutputString(volatile page_state_s *s, int src_pos, void *dstv) { const char *ptr = NULL; size_t len = 0; if (s->dict_base) { // String dictionary uint32_t dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] * sizeof(nvstrdesc_s) : 0; if (dict_pos < (uint32_t)s->dict_size) { const nvstrdesc_s *src = reinterpret_cast<const nvstrdesc_s *>(s->dict_base + dict_pos); ptr = src->ptr; len = src->count; } } else { // Plain encoding uint32_t dict_pos = s->dict_idx[src_pos & (NZ_BFRSZ - 1)]; if (dict_pos <= (uint32_t)s->dict_size) { ptr = reinterpret_cast<const char *>(s->data_start + dict_pos); len = s->str_len[src_pos & (NZ_BFRSZ - 1)]; } } if (s->dtype_len == 4) { // Output hash *reinterpret_cast<uint32_t *>(dstv) = device_str2hash32(ptr, len); } else { // Output string descriptor nvstrdesc_s *dst = reinterpret_cast<nvstrdesc_s *>(dstv); dst->ptr = ptr; dst->count = len; } } /** * @brief Output a boolean * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputBoolean(volatile page_state_s *s, int src_pos, uint8_t *dst) { *dst = s->dict_idx[src_pos & (NZ_BFRSZ - 1)]; } /** * @brief Store a 32-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint32_t *dst, const uint8_t *src8, uint32_t dict_pos, uint32_t dict_size) { uint32_t bytebuf; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { bytebuf = *(const uint32_t *)(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *(const uint32_t *)(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } *dst = bytebuf; } /** * @brief Store a 64-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint2 *dst, const uint8_t *src8, uint32_t dict_pos, uint32_t dict_size) { uint2 v; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { v.x = *(const uint32_t *)(src8 + dict_pos + 0); v.y = *(const uint32_t *)(src8 + dict_pos + 4); if (ofs) { uint32_t next = *(const uint32_t *)(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } } else { v.x = v.y = 0; } *dst = v; } /** * @brief Convert an INT96 Spark timestamp to 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputInt96Timestamp(volatile page_state_s *s, int src_pos, int64_t *dst) { const uint8_t *src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint3 v; int64_t nanos, secs, days; v.x = *(const uint32_t *)(src8 + dict_pos + 0); v.y = *(const uint32_t *)(src8 + dict_pos + 4); v.z = *(const uint32_t *)(src8 + dict_pos + 8); if (ofs) { uint32_t next = *(const uint32_t *)(src8 + dict_pos + 12); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, v.z, ofs); v.z = __funnelshift_r(v.z, next, ofs); } nanos = v.y; nanos <<= 32; nanos |= v.x; // Convert from Julian day at noon to UTC seconds days = static_cast<int32_t>(v.z); secs = (days - 2440588) * (24 * 60 * 60); // TBD: Should be noon instead of midnight, but this matches pyarrow if (s->col.ts_clock_rate) ts = (secs * s->col.ts_clock_rate) + nanos / (1000000000 / s->col.ts_clock_rate); // Output to desired clock rate else ts = (secs * 1000000000) + nanos; } else { ts = 0; } *dst = ts; } /** * @brief Output a 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputInt64Timestamp(volatile page_state_s *s, int src_pos, int64_t *dst) { const uint8_t *src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint2 v; int64_t val; int32_t ts_scale; v.x = *(const uint32_t *)(src8 + dict_pos + 0); v.y = *(const uint32_t *)(src8 + dict_pos + 4); if (ofs) { uint32_t next = *(const uint32_t *)(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } val = v.y; val <<= 32; val |= v.x; // Output to desired clock rate ts_scale = s->ts_scale; if (ts_scale < 0) { // round towards negative infinity int sign = (val < 0); ts = ((val + sign) / -ts_scale) + sign; } else { ts = val * ts_scale; } } else { ts = 0; } *dst = ts; } /** * @brief Powers of 10 */ static const __device__ __constant__ double kPow10[40] = { 1.0, 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7, 1.e8, 1.e9, 1.e10, 1.e11, 1.e12, 1.e13, 1.e14, 1.e15, 1.e16, 1.e17, 1.e18, 1.e19, 1.e20, 1.e21, 1.e22, 1.e23, 1.e24, 1.e25, 1.e26, 1.e27, 1.e28, 1.e29, 1.e30, 1.e31, 1.e32, 1.e33, 1.e34, 1.e35, 1.e36, 1.e37, 1.e38, 1.e39, }; /** * @brief Output a decimal type ([INT32..INT128] + scale) as a 64-bit float * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data * @param[in] dtype Stored data type */ inline __device__ void gpuOutputDecimal(volatile page_state_s *s, int src_pos, double *dst, int dtype) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size, dtype_len_in; int64_t i128_hi, i128_lo; int32_t scale; double d; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dtype_len_in = s->dtype_len_in; dict_pos *= dtype_len_in; // FIXME: Not very efficient (currently reading 1 byte at a time) -> need a variable-length // unaligned load utility function (both little-endian and big-endian versions) if (dtype == INT32) { int32_t lo32 = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; lo32 |= v << (i * 8); } i128_lo = lo32; i128_hi = lo32 >> 31; } else if (dtype == INT64) { int64_t lo64 = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint64_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; lo64 |= v << (i * 8); } i128_lo = lo64; i128_hi = lo64 >> 63; } else // if (dtype == FIXED_LENGTH_BYTE_ARRAY) { i128_lo = 0; for (unsigned int i = dtype_len_in - min(dtype_len_in, 8); i < dtype_len_in; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; i128_lo = (i128_lo << 8) | v; } if (dtype_len_in > 8) { i128_hi = 0; for (unsigned int i = dtype_len_in - min(dtype_len_in, 16); i < dtype_len_in - 8; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; i128_hi = (i128_hi << 8) | v; } if (dtype_len_in < 16) { i128_hi <<= 64 - (dtype_len_in - 8) * 8; i128_hi >>= 64 - (dtype_len_in - 8) * 8; } } else { if (dtype_len_in < 8) { i128_lo <<= 64 - dtype_len_in * 8; i128_lo >>= 64 - dtype_len_in * 8; } i128_hi = i128_lo >> 63; } } scale = s->col.decimal_scale; d = Int128ToDouble_rn(i128_lo, i128_hi); *dst = (scale < 0) ? (d * kPow10[min(-scale, 39)]) : (d / kPow10[min(scale, 39)]); } /** * @brief Output a small fixed-length value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T> inline __device__ void gpuOutputFast(volatile page_state_s *s, int src_pos, T *dst) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; gpuStoreOutput(dst, dict, dict_pos, dict_size); } /** * @brief Output a N-byte value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst8 Pointer to row output data * @param[in] len Length of element */ static __device__ void gpuOutputGeneric(volatile page_state_s *s, int src_pos, uint8_t *dst8, int len) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; if (len & 3) { // Generic slow path for (unsigned int i = 0; i < len; i++) { dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; } } else { // Copy 4 bytes at a time const uint8_t *src8 = dict; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits for (unsigned int i = 0; i < len; i += 4) { uint32_t bytebuf; if (dict_pos < dict_size) { bytebuf = *(const uint32_t *)(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *(const uint32_t *)(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } dict_pos += 4; *(uint32_t *)(dst8 + i) = bytebuf; } } } /** * @brief Sets up block-local page state information from the global pages. * * @param[in, out] s The local page state to be filled in * @param[in] p The global page to be copied from * @param[in] chunks The global list of chunks * @param[in] num_rows Maximum number of rows to read * @param[in] min_row crop all rows below min_row * @param[in] num_chunk Number of column chunks */ static __device__ bool setupLocalPageInfo(page_state_s *const s, PageInfo *p, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks) { int t = threadIdx.x; int chunk_idx; // Fetch page info // NOTE: Assumes that sizeof(PageInfo) <= 256 (and is padded to 4 bytes) if (t < sizeof(PageInfo) / sizeof(uint32_t)) { ((uint32_t *)&s->page)[t] = ((const uint32_t *)p)[t]; } __syncthreads(); if (s->page.flags & PAGEINFO_FLAGS_DICTIONARY) { return false; } // Fetch column chunk info chunk_idx = s->page.chunk_idx; if ((uint32_t)chunk_idx < (uint32_t)num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 256 (and is padded to 4 bytes) if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { ((uint32_t *)&s->col)[t] = ((const uint32_t *)&chunks[chunk_idx])[t]; } } // zero nested value and valid counts int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].valid_count = 0; s->page.nesting[d + t].value_count = 0; } d += blockDim.x; } __syncthreads(); if (!t) { s->error = 0; // our starting row (absolute index) is // col.start_row == absolute row index // page.chunk-row == relative row index within the chunk size_t page_start_row = s->col.start_row + s->page.chunk_row; // IMPORTANT : nested schemas can have 0 rows in a page but still have // values. The case is: // - On page N-1, the last row starts, with 2/6 values encoded // - On page N, the remaining 4/6 values are encoded, but there are no new rows. // if (s->page.num_input_values > 0 && s->page.num_rows > 0) { if (s->page.num_input_values > 0) { uint8_t *cur = s->page.page_data; uint8_t *end = cur + s->page.uncompressed_page_size; uint32_t dtype_len_out = s->col.data_type >> 3; s->ts_scale = 0; // Validate data type switch (s->col.data_type & 7) { case BOOLEAN: s->dtype_len = 1; // Boolean are stored as 1 byte on the output break; case INT32: case FLOAT: s->dtype_len = 4; break; case INT64: if (s->col.ts_clock_rate) { int32_t units = 0; if (s->col.converted_type == TIME_MICROS || s->col.converted_type == TIMESTAMP_MICROS) units = 1000000; else if (s->col.converted_type == TIME_MILLIS || s->col.converted_type == TIMESTAMP_MILLIS) units = 1000; if (units && units != s->col.ts_clock_rate) s->ts_scale = (s->col.ts_clock_rate < units) ? -(units / s->col.ts_clock_rate) : (s->col.ts_clock_rate / units); } // Fall through to DOUBLE case DOUBLE: s->dtype_len = 8; break; case INT96: s->dtype_len = 12; break; case BYTE_ARRAY: s->dtype_len = sizeof(nvstrdesc_s); break; default: // FIXED_LEN_BYTE_ARRAY: s->dtype_len = dtype_len_out; s->error |= (s->dtype_len <= 0); break; } // Special check for downconversions s->dtype_len_in = s->dtype_len; if (s->col.converted_type == DECIMAL) { s->dtype_len = 8; // Convert DECIMAL to 64-bit float } else if ((s->col.data_type & 7) == INT32) { if (dtype_len_out == 1) s->dtype_len = 1; // INT8 output if (dtype_len_out == 2) s->dtype_len = 2; // INT16 output } else if ((s->col.data_type & 7) == BYTE_ARRAY && dtype_len_out == 4) { s->dtype_len = 4; // HASH32 output } else if ((s->col.data_type & 7) == INT96) { s->dtype_len = 8; // Convert to 64-bit timestamp } // first row within the page to start reading if (page_start_row >= min_row) { s->first_row = 0; } else { s->first_row = (int32_t)min(min_row - page_start_row, (size_t)s->page.num_rows); } // # of rows within the page to read s->num_rows = s->page.num_rows; if ((page_start_row + s->first_row) + s->num_rows > min_row + num_rows) { s->num_rows = (int32_t)max((int64_t)(min_row + num_rows - (page_start_row + s->first_row)), INT64_C(0)); } // during the decoding step we need to offset the global output buffers // for each level of nesting so that we write to the section this page // is reponsible for. // - for flat schemas, we can do this directly by using row counts // - for nested schemas, these offsets are computed during the preprocess step if (s->col.column_data_base != nullptr) { int max_depth = s->col.max_level[level_type::REPETITION]; for (int idx = 0; idx <= max_depth; idx++) { PageNestingInfo *pni = &s->page.nesting[idx]; size_t output_offset; if (max_depth == 0) { output_offset = page_start_row >= min_row ? page_start_row - min_row : 0; } // for nested schemas, we've already got the exactly value precomputed else { output_offset = pni->page_start_value; } // anything below max depth is an offset uint32_t len = idx < max_depth ? sizeof(cudf::size_type) : s->dtype_len; pni->data_out = reinterpret_cast<uint8_t *>(s->col.column_data_base[idx]) + (output_offset * len); pni->valid_map = s->col.valid_map_base[idx]; if (pni->valid_map != nullptr) { pni->valid_map += output_offset >> 5; pni->valid_map_offset = (int32_t)(output_offset & 0x1f); } } } s->first_output_value = 0; // Find the compressed size of repetition levels cur += InitLevelSection(s, cur, end, level_type::REPETITION); // Find the compressed size of definition levels cur += InitLevelSection(s, cur, end, level_type::DEFINITION); s->dict_bits = 0; s->dict_base = 0; s->dict_size = 0; switch (s->page.encoding) { case PLAIN_DICTIONARY: case RLE_DICTIONARY: // RLE-packed dictionary indices, first byte indicates index length in bits if (((s->col.data_type & 7) == BYTE_ARRAY) && (s->col.str_dict_index)) { // String dictionary: use index s->dict_base = reinterpret_cast<const uint8_t *>(s->col.str_dict_index); s->dict_size = s->col.page_info[0].num_input_values * sizeof(nvstrdesc_s); } else { s->dict_base = s->col.page_info[0].page_data; // dictionary is always stored in the first page s->dict_size = s->col.page_info[0].uncompressed_page_size; } s->dict_run = 0; s->dict_val = 0; s->dict_bits = (cur < end) ? *cur++ : 0; if (s->dict_bits > 32 || !s->dict_base) { s->error = (10 << 8) | s->dict_bits; } break; case PLAIN: s->dict_size = static_cast<int32_t>(end - cur); s->dict_val = 0; if ((s->col.data_type & 7) == BOOLEAN) { s->dict_run = s->dict_size * 2 + 1; } break; case RLE: s->dict_run = 0; break; default: s->error = 1; // Unsupported encoding break; } if (cur > end) { s->error = 1; } s->data_start = cur; s->data_end = end; } else { s->error = 1; } s->lvl_count[level_type::REPETITION] = 0; s->lvl_count[level_type::DEFINITION] = 0; s->nz_count = 0; s->num_input_values = s->page.num_input_values; s->dict_pos = 0; s->out_pos = 0; // handle row bounds (skip_rows, min_rows) s->input_row_count = s->first_row; // return the lower bound to compare (page-relative) thread row index against. Explanation: // In the case of nested schemas, rows can span page boundaries. That is to say, // we can encounter the first value for row X on page M, but the last value for page M // might not be the last value for row X. page M+1 (or further) may contain the last value. // // This means that the first values we encounter for a given page (M+1) may not belong to the // row indicated by chunk_row, but to the row before it that spanned page boundaries. If that // previous row is within the overall row bounds, include the values by allowing relative row // index -1 int max_row = (min_row + num_rows) - 1; if (min_row < page_start_row && max_row >= page_start_row - 1) { s->row_index_lower_bound = -1; } else { s->row_index_lower_bound = s->first_row; } // if we're in the decoding step, jump directly to the first // value we care about if (s->col.column_data_base != nullptr) { // for flat hierarchies, we haven't computed skipped_values yet, but we can do so trivially // now if (s->col.max_level[level_type::REPETITION] == 0) { s->page.skipped_values = s->first_row; s->page.skipped_leaf_values = s->first_row; } s->input_value_count = s->page.skipped_values; } else { s->input_value_count = 0; s->input_leaf_count = 0; s->page.skipped_values = -1; s->page.skipped_leaf_values = -1; } __threadfence_block(); } __syncthreads(); return true; } /** * @brief Store a validity mask containing value_count bits into the output validity buffer of the * page. * * @param[in,out] pni The page/nesting information to store the mask in. The validity map offset is * also updated * @param[in] valid_mask The validity mask to be stored * @param[in] value_count # of bits in the validity mask */ static __device__ void store_validity(PageNestingInfo *pni, uint32_t valid_mask, int32_t value_count) { int word_offset = pni->valid_map_offset / 32; int bit_offset = pni->valid_map_offset % 32; // if we fit entirely in the output word if (bit_offset + value_count <= 32) { uint32_t relevant_mask = static_cast<uint32_t>((static_cast<uint64_t>(1) << value_count) - 1); if (relevant_mask == ~0) { pni->valid_map[word_offset] = valid_mask; } else { atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, (valid_mask & relevant_mask) << bit_offset); } } // we're going to spill over into the next word. // note : writing both values here is the lazy/slow way. we could be writing just // the first word and rolling the remaining bits over into the next call. // however, some basic performance tests shows almost no difference between these two // methods. More detailed performance testing might be worthwhile here. else { uint32_t bits_left = 32 - bit_offset; // first word. strip bits_left bits off the beginning and store that uint32_t relevant_mask = ((1 << bits_left) - 1); uint32_t mask_word0 = valid_mask & relevant_mask; atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, mask_word0 << bit_offset); // second word. strip the remainder of the bits off the end and store that relevant_mask = ((1 << (value_count - bits_left)) - 1); uint32_t mask_word1 = valid_mask & (relevant_mask << bits_left); atomicAnd(pni->valid_map + word_offset + 1, ~(relevant_mask)); atomicOr(pni->valid_map + word_offset + 1, mask_word1 >> bits_left); } pni->valid_map_offset += value_count; } /** * @brief Process a batch of incoming repetition/definition level values and generate * validity, nested column offsets (where appropriate) and decoding indices. * * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] s Local page information * @param[in] t Thread index */ static __device__ void gpuUpdateValidityOffsetsAndRowIndices(int32_t target_input_value_count, page_state_s *s, int t) { // max nesting depth of the column int max_depth = s->col.max_level[level_type::REPETITION]; // how many (input) values we've processed in the page so far int input_value_count = s->input_value_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; // process until we've reached the target while (input_value_count < target_input_value_count) { // determine the nesting bounds for this thread int start_depth = -1; int end_depth = -1; int d = -1; if (input_value_count + t < target_input_value_count) { int index = rolling_index(input_value_count + t); // important : we don't decode repetition levels for flat schemas. we can assume the // repetition level is always 0. int r = max_depth == 0 ? 0 : s->rep[index]; start_depth = r; d = s->def[index]; end_depth = s->page.nesting[d].d_remap; } // 4 interesting things to track: // thread_value_count : # of output values from the view of this thread // warp_value_count : # of output values for the whole warp // // thread_valid_count : # of valid values from the view of this thread // warp_valid_count : # of valid values for the whole warp uint32_t thread_value_count, warp_value_count; uint32_t thread_valid_count, warp_valid_count; // track (page-relative) row index for the thread so we can compare against input bounds // keep track of overall # of rows we've read. int is_new_row = start_depth == 0 ? 1 : 0; uint32_t warp_row_count_mask = BALLOT(is_new_row); int32_t thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); input_row_count += __popc(warp_row_count_mask); // is this thread within row bounds? int in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; // compute warp and thread value counts uint32_t warp_count_mask = BALLOT((0 >= start_depth && 0 <= end_depth) && in_row_bounds ? 1 : 0); warp_value_count = __popc(warp_count_mask); // Note : ((1 << t) - 1) implies "for all threads before me" thread_value_count = __popc(warp_count_mask & ((1 << t) - 1)); // always walk from 0 to max_depth even if our start and end depths are different. // otherwise we'd have thread/warp synchronization issues on the BALLOT() and WarpReduce() // calls. uint32_t next_thread_value_count, next_warp_value_count; for (int s_idx = 0; s_idx <= max_depth; s_idx++) { PageNestingInfo *pni = &s->page.nesting[s_idx]; int in_bounds = ((s_idx >= start_depth && s_idx <= end_depth) && in_row_bounds) ? 1 : 0; // everything up to the max_def_level is a real value uint32_t is_valid = 0; if (d >= pni->max_def_level && in_bounds) { is_valid = 1; } // compute warp and thread valid counts uint32_t warp_valid_mask; // for flat schemas, a simple ballot_sync gives us the correct count and bit positions because // every value in the input matches to a value in the output if (max_depth == 0) { warp_valid_mask = BALLOT(is_valid); } // for nested schemas, it's more complicated. This warp will visit 32 incoming values, // however not all of them will necessarily represent a value at this nesting level. so the // validity bit for thread t might actually represent output value t-6. the correct position // for thread t's bit is cur_value_count. for cuda 11 we could use __reduce_or_sync(), but // until then we have to do a warp reduce. else { warp_valid_mask = WarpReduceOr32(is_valid << thread_value_count); } thread_valid_count = __popc(warp_valid_mask & ((1 << thread_value_count) - 1)); warp_valid_count = __popc(warp_valid_mask); // if this is the value column emit an index if (is_valid && s_idx == max_depth) { int idx = pni->valid_count + thread_valid_count; int ofs = pni->value_count + thread_value_count; s->nz_idx[rolling_index(idx)] = ofs; } // compute warp and thread value counts for the -next- nesting level. we need to // do this for here nested schemas so that we can emit an offset for the -current- nesting // level. more concretely : the offset for the current nesting level == current length of the // next nesting level if (s_idx < max_depth) { uint32_t next_warp_count_mask = BALLOT((s_idx + 1 >= start_depth && s_idx + 1 <= end_depth && in_row_bounds) ? 1 : 0); next_warp_value_count = __popc(next_warp_count_mask); next_thread_value_count = __popc(next_warp_count_mask & ((1 << t) - 1)); // if we're -not- at a leaf column, and we're within row bounds, emit an offset if (in_bounds) { int idx = pni->value_count + thread_value_count; cudf::size_type ofs = s->page.nesting[s_idx + 1].value_count + next_thread_value_count + s->page.nesting[s_idx + 1].page_start_value; (reinterpret_cast<cudf::size_type *>(pni->data_out))[idx] = ofs; } } // increment count of valid values, count of total values, and validity mask if (!t) { if (pni->valid_map != nullptr) { store_validity(pni, warp_valid_mask, warp_value_count); } pni->valid_count += warp_valid_count; pni->value_count += warp_value_count; } // propagate value counts for the next level warp_value_count = next_warp_value_count; thread_value_count = next_thread_value_count; } input_value_count += min(32, (target_input_value_count - input_value_count)); SYNCWARP(); } // update if (!t) { // update valid value count for decoding and total # of values we've processed s->nz_count = s->page.nesting[max_depth].valid_count; s->input_value_count = input_value_count; s->input_row_count = input_row_count; } } /** * @brief Process repetition and definition levels up to the target count of leaf values. * * In order to decode actual leaf values from the input stream, we need to generate the * list of non-null value positions (page_state_s::nz_idx). We do this by processing * the repetition and definition level streams. This process also generates validity information, * and offset column values in the case of nested schemas. Because of the way the streams * are encoded, this function may generate slightly more than target_leaf_count. * * Only runs on 1 warp. * * @param[in] s The local page state * @param[in] target_leaf_count Target count of non-null leaf values to generate indices for * @param[in] t Thread index */ __device__ void gpuDecodeLevels(page_state_s *s, int32_t target_leaf_count, int t) { int max_depth = s->col.max_level[level_type::REPETITION]; constexpr int batch_size = 32; int cur_leaf_count = target_leaf_count; while (!s->error && s->nz_count < target_leaf_count && s->input_value_count < s->num_input_values) { // only need to decode repetition levels in the case of a nested schema if (max_depth > 0) { gpuDecodeStream(s->rep, s, cur_leaf_count, t, level_type::REPETITION); } gpuDecodeStream(s->def, s, cur_leaf_count, t, level_type::DEFINITION); SYNCWARP(); // because the rep and def streams are encoded seperately, we cannot request an exact // # of values to be decoded at once. we can only process the lowest # of decoded rep/def // levels we get. int actual_leaf_count = max_depth > 0 ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdateValidityOffsetsAndRowIndices(actual_leaf_count, s, t); cur_leaf_count = actual_leaf_count + batch_size; SYNCWARP(); } } /** * @brief Process a batch of incoming repetition/definition level values to generate * per-nesting level output column size for this page. * * Each page represents one piece of the overall output column. The total output (cudf) * column sizes are the sum of the values in each individual page. * * @param[in] s The local page info * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] t Thread index * @param[in] bounds_set Whether or not s->row_index_lower_bound, s->first_row and s->num_rows * have been computed for this page (they will only be set in the second/trim pass). */ static __device__ void gpuUpdatePageSizes(page_state_s *s, int32_t target_input_value_count, int t, bool bounds_set) { // max nesting depth of the column int max_depth = s->col.max_level[level_type::REPETITION]; // how many input level values we've processed in the page so far int input_value_count = s->input_value_count; // how many leaf values we've processed in the page so far int input_leaf_count = s->input_leaf_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; while (input_value_count < target_input_value_count) { // determine the nesting bounds for this thread int start_depth = -1; int end_depth = -1; int d = -1; if (input_value_count + t < target_input_value_count) { int index = rolling_index(input_value_count + t); int r = s->rep[index]; start_depth = r; d = s->def[index]; end_depth = s->page.nesting[d].d_remap; } // count rows and leaf values int is_new_row = start_depth == 0 ? 1 : 0; uint32_t warp_row_count_mask = BALLOT(is_new_row); int is_new_leaf = (d >= s->page.nesting[max_depth].max_def_level) ? 1 : 0; uint32_t warp_leaf_count_mask = BALLOT(is_new_leaf); // is this thread within row bounds? on the first pass we don't know the bounds, so we will be // computing the full size of the column. on the second pass, we will know our actual row // bounds, so the computation will cap sizes properly. int in_row_bounds = 1; if (bounds_set) { // absolute row index int32_t thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; uint32_t row_bounds_mask = BALLOT(in_row_bounds); int first_thread_in_range = __ffs(row_bounds_mask) - 1; // if we've found the beginning of the first row, mark down the position // in the def/repetition buffer (skipped_values) and the data buffer (skipped_leaf_values) if (!t && first_thread_in_range >= 0 && s->page.skipped_values < 0) { // how many values we've skipped in the rep/def levels s->page.skipped_values = input_value_count + first_thread_in_range; // how many values we've skipped in the actual data stream s->page.skipped_leaf_values = input_leaf_count + __popc(warp_leaf_count_mask & ((1 << first_thread_in_range) - 1)); } } // increment counts across all nesting depths for (int s_idx = 0; s_idx <= max_depth; s_idx++) { int in_bounds = (s_idx >= start_depth && s_idx <= end_depth && in_row_bounds) ? 1 : 0; uint32_t count_mask = BALLOT(in_bounds); if (!t) { s->page.nesting[s_idx].size += __popc(count_mask); } } input_value_count += min(32, (target_input_value_count - input_value_count)); input_row_count += __popc(warp_row_count_mask); input_leaf_count += __popc(warp_leaf_count_mask); } // update final page value count if (!t) { s->input_value_count = target_input_value_count; s->input_leaf_count = input_leaf_count; s->input_row_count = input_row_count; } } /** * @brief Kernel for computing per-page column size information for all nesting levels. * * This function will write out the size field for each level of nesting. * * @param[in,out] pages List of pages * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks * @param[in] min_row Row index to start reading at * @param[in] num_rows Maximum number of rows to read * @param[in] num_chunks Number of column chunks * @param[in] trim_pass Whether or not this is the trim pass. We first have to compute * the full size information of every page before we come through in a second (trim) pass * to determine what subset of rows in this page we should be reading. */ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuComputePageSizes(PageInfo *pages, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks, bool trim_pass) { __shared__ __align__(16) page_state_s state_g; page_state_s *const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; PageInfo *pp = &pages[page_idx]; if (!setupLocalPageInfo( s, pp, chunks, trim_pass ? min_row : 0, trim_pass ? num_rows : INT_MAX, num_chunks)) { return; } // zero sizes int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].size = 0; } d += blockDim.x; } if (!t) { s->page.skipped_values = -1; s->page.skipped_leaf_values = -1; s->input_row_count = 0; s->input_value_count = 0; // if this isn't the trim pass, make sure we visit absolutely everything if (!trim_pass) { s->first_row = 0; s->num_rows = INT_MAX; s->row_index_lower_bound = -1; } } __syncthreads(); // optimization : it might be useful to have a version of gpuDecodeStream that could go // wider than 1 warp. Currently it only only uses 1 warp so that it can overlap work // with the value decoding step when in the actual value decoding kernel. however during // this preprocess step we have no such limits - we could go as wide as NTHREADS if (t < 32) { constexpr int batch_size = 32; int target_input_count = batch_size; while (!s->error && s->input_value_count < s->num_input_values) { // decode repetition and definition levels. these will attempt to decode at // least up to the target, but may decode a few more. gpuDecodeStream(s->rep, s, target_input_count, t, level_type::REPETITION); gpuDecodeStream(s->def, s, target_input_count, t, level_type::DEFINITION); SYNCWARP(); // we may have decoded different amounts from each stream, so only process what we've been int actual_input_count = min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]); // process what we got back gpuUpdatePageSizes(s, actual_input_count, t, trim_pass); target_input_count = actual_input_count + batch_size; SYNCWARP(); } } // update # rows in the actual page if (!t) { pp->num_rows = s->page.nesting[0].size; pp->skipped_values = s->page.skipped_values; pp->skipped_leaf_values = s->page.skipped_leaf_values; } } /** * @brief Kernel for co the column data stored in the pages * * This function will write the page data and the page data's validity to the * output specified in the page's column chunk. If necessary, additional * conversion will be performed to translate from the Parquet datatype to * desired output datatype (ex. 32-bit to 16-bit, string to hash). * * @param[in] pages List of pages * @param[in,out] chunks List of column chunks * @param[in] min_row Row index to start reading at * @param[in] num_rows Maximum number of rows to read * @param[in] num_chunks Number of column chunks */ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuDecodePageData(PageInfo *pages, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks) { __shared__ __align__(16) page_state_s state_g; page_state_s *const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; int out_thread0; if (!setupLocalPageInfo(s, &pages[page_idx], chunks, min_row, num_rows, num_chunks)) { return; } if (s->dict_base) { out_thread0 = (s->dict_bits > 0) ? 64 : 32; } else { out_thread0 = ((s->col.data_type & 7) == BOOLEAN || (s->col.data_type & 7) == BYTE_ARRAY) ? 64 : 32; } uint32_t skipped_leaf_values = s->page.skipped_leaf_values; while (!s->error && (s->input_value_count < s->num_input_values || s->out_pos < s->nz_count)) { int target_pos; int out_pos = s->out_pos; if (t < out_thread0) { target_pos = min(out_pos + 2 * (NTHREADS - out_thread0), s->nz_count + (NTHREADS - out_thread0)); } else { target_pos = min(s->nz_count, out_pos + NTHREADS - out_thread0); if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); } } __syncthreads(); if (t < 32) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels(s, target_pos, t); } else if (t < out_thread0) { uint32_t src_target_pos = target_pos + skipped_leaf_values; // WARP1: Decode dictionary indices, booleans or string positions if (s->dict_base) { src_target_pos = gpuDecodeDictionaryIndices(s, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BOOLEAN) { src_target_pos = gpuDecodeRleBooleans(s, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BYTE_ARRAY) { gpuInitStringDescriptors(s, src_target_pos, t & 0x1f); } if (t == 32) { *(volatile int32_t *)&s->dict_pos = src_target_pos; } } else { // WARP1..WARP3: Decode values int dtype = s->col.data_type & 7; out_pos += t - out_thread0; uint32_t src_pos = out_pos + skipped_leaf_values; int output_value_idx = s->nz_idx[rolling_index(out_pos)]; if (out_pos < target_pos && output_value_idx >= 0 && output_value_idx < s->num_input_values) { // nesting level that is storing actual leaf values int leaf_level_index = s->col.max_level[level_type::REPETITION]; uint32_t dtype_len = s->dtype_len; uint8_t *dst = s->page.nesting[leaf_level_index].data_out + static_cast<size_t>(output_value_idx) * dtype_len; if (dtype == BYTE_ARRAY) gpuOutputString(s, src_pos, dst); else if (dtype == BOOLEAN) gpuOutputBoolean(s, src_pos, dst); else if (s->col.converted_type == DECIMAL) gpuOutputDecimal(s, src_pos, reinterpret_cast<double *>(dst), dtype); else if (dtype == INT96) gpuOutputInt96Timestamp(s, src_pos, reinterpret_cast<int64_t *>(dst)); else if (dtype_len == 8) { if (s->ts_scale) gpuOutputInt64Timestamp(s, src_pos, reinterpret_cast<int64_t *>(dst)); else gpuOutputFast(s, src_pos, reinterpret_cast<uint2 *>(dst)); } else if (dtype_len == 4) gpuOutputFast(s, src_pos, reinterpret_cast<uint32_t *>(dst)); else gpuOutputGeneric(s, src_pos, dst, dtype_len); } if (t == out_thread0) { *(volatile int32_t *)&s->out_pos = target_pos; } } __syncthreads(); } } struct chunk_row_output_iter { PageInfo *p; using value_type = size_type; using difference_type = size_type; using pointer = size_type *; using reference = size_type &; using iterator_category = thrust::output_device_iterator_tag; chunk_row_output_iter operator+ __host__ __device__(int i) { return chunk_row_output_iter{p + i}; } void operator++ __host__ __device__() { p++; } reference operator[] __device__(int i) { return p[i].chunk_row; } reference operator*__device__() { return p->chunk_row; } void operator= __device__(value_type v) { p->chunk_row = v; } }; struct start_offset_output_iterator { PageInfo *p; int col_index; int nesting_depth; int empty = 0; using value_type = size_type; using difference_type = size_type; using pointer = size_type *; using reference = size_type &; using iterator_category = thrust::output_device_iterator_tag; start_offset_output_iterator operator+ __host__ __device__(int i) { return start_offset_output_iterator{p + i, col_index, nesting_depth}; } void operator++ __host__ __device__() { p++; } reference operator[] __device__(int i) { return dereference(p + i); } reference operator*__device__() { return dereference(p); } void operator= __device__(value_type v) { if (p->column_idx == col_index && !(p->flags & PAGEINFO_FLAGS_DICTIONARY)) { p->nesting[nesting_depth].page_start_value = 2; } } private: reference __device__ dereference(PageInfo *p) { if (p->column_idx != col_index || p->flags & PAGEINFO_FLAGS_DICTIONARY) { return empty; } return p->nesting[nesting_depth].page_start_value; } }; /** * @copydoc cudf::io::parquet::gpu::PreprocessColumnData */ hipError_t PreprocessColumnData(hostdevice_vector<PageInfo> &pages, hostdevice_vector<ColumnChunkDesc> const &chunks, std::vector<std::vector<std::pair<int, bool>>> &nested_info, size_t num_rows, size_t min_row, hipStream_t stream) { dim3 dim_block(NTHREADS, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page // computes: // PageNestingInfo::size for each level of nesting, for each page. // The output from this does not take row bounds (num_rows, min_row) into account hipLaunchKernelGGL(( gpuComputePageSizes), dim3(dim_grid), dim3(dim_block), 0, stream, pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), false); CUDA_TRY(hipStreamSynchronize(stream)); // computes: // PageInfo::chunk_row for all pages auto key_input = thrust::make_transform_iterator( pages.device_ptr(), [] __device__(PageInfo const &page) { return page.chunk_idx; }); auto page_input = thrust::make_transform_iterator( pages.device_ptr(), [] __device__(PageInfo const &page) { return page.num_rows; }); thrust::exclusive_scan_by_key(rmm::exec_policy(stream)->on(stream), key_input, key_input + pages.size(), page_input, chunk_row_output_iter{pages.device_ptr()}); // computes: // PageNestingInfo::size for each level of nesting, for each page, taking row bounds into account. // PageInfo::skipped_values, which tells us where to start decoding in the input hipLaunchKernelGGL(( gpuComputePageSizes), dim3(dim_grid), dim3(dim_block), 0, stream, pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), true); // retrieve pages back (PageInfo::num_rows has been set. if we don't bring it // back, this value will get overwritten later on). pages.device_to_host(stream, true); // computes: // output column sizes for each level of nesting (summing PageNestingInfo::size) // per-page start offsets for each level of nesting for (size_t idx = 0; idx < nested_info.size(); idx++) { size_t max_depth = nested_info[idx].size() - 1; for (size_t l_idx = 0; l_idx <= max_depth; l_idx++) { // column size auto page_input = thrust::make_transform_iterator( pages.device_ptr(), [idx, l_idx] __device__(PageInfo const &page) { if (page.column_idx != idx || page.flags & PAGEINFO_FLAGS_DICTIONARY) { return 0; } return page.nesting[l_idx].size; }); nested_info[idx][l_idx].first = thrust::reduce(rmm::exec_policy(stream)->on(stream), page_input, page_input + pages.size()); // add 1 for non-leaf levels for the terminating offset if (l_idx < max_depth) { nested_info[idx][l_idx].first++; } // per-page start offset auto key_input = thrust::make_transform_iterator( pages.device_ptr(), [] __device__(PageInfo const &page) { return page.column_idx; }); thrust::exclusive_scan_by_key( rmm::exec_policy(stream)->on(stream), key_input, key_input + pages.size(), page_input, start_offset_output_iterator{pages.device_ptr(), static_cast<int>(chunks[idx].dst_col_index), static_cast<int>(l_idx)}); } } return hipSuccess; } /** * @copydoc cudf::io::parquet::gpu::DecodePageData */ hipError_t __host__ DecodePageData(hostdevice_vector<PageInfo> &pages, hostdevice_vector<ColumnChunkDesc> const &chunks, size_t num_rows, size_t min_row, hipStream_t stream) { dim3 dim_block(NTHREADS, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page hipLaunchKernelGGL(( gpuDecodePageData), dim3(dim_grid), dim3(dim_block), 0, stream, pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size()); return hipSuccess; } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
db6ca7f99bbae594107e3c4fb70c282f003c80b2.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <rmm/thrust_rmm_allocator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/scan.h> #include <thrust/tuple.h> #include <cudf/detail/utilities/release_assert.cuh> #include <cudf/utilities/bit.hpp> #include <io/utilities/block_utils.cuh> #include "parquet_gpu.h" #define LOG2_NTHREADS (5 + 2) #define NTHREADS (1 << LOG2_NTHREADS) #define NZ_BFRSZ (NTHREADS * 2) inline __device__ uint32_t rotl32(uint32_t x, uint32_t r) { return __funnelshift_l(x, x, r); // (x << r) | (x >> (32 - r)); } inline __device__ int rolling_index(int index) { return index & (NZ_BFRSZ - 1); } namespace cudf { namespace io { namespace parquet { namespace gpu { struct page_state_s { const uint8_t *data_start; const uint8_t *data_end; const uint8_t *dict_base; // ptr to dictionary page data int32_t dict_size; // size of dictionary data int32_t first_row; // First row in page to output int32_t num_rows; // Rows in page to decode (including rows to be skipped) int32_t first_output_value; // First value in page to output int32_t num_input_values; // total # of input/level values in the page int32_t dtype_len; // Output data type length int32_t dtype_len_in; // Can be larger than dtype_len if truncating 32-bit into 8-bit int32_t dict_bits; // # of bits to store dictionary indices uint32_t dict_run; int32_t dict_val; uint32_t initial_rle_run[NUM_LEVEL_TYPES]; // [def,rep] int32_t initial_rle_value[NUM_LEVEL_TYPES]; // [def,rep] int32_t error; PageInfo page; ColumnChunkDesc col; // (leaf) value decoding int32_t nz_count; // number of valid entries in nz_idx (write position in circular buffer) int32_t dict_pos; // write position of dictionary indices int32_t out_pos; // read position of final output int32_t ts_scale; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale uint32_t nz_idx[NZ_BFRSZ]; // circular buffer of non-null value positions uint32_t dict_idx[NZ_BFRSZ]; // Dictionary index, boolean, or string offset values uint32_t str_len[NZ_BFRSZ]; // String length for plain encoding of strings // repetition/definition level decoding int32_t input_value_count; // how many values of the input we've processed int32_t input_row_count; // how many rows of the input we've processed int32_t input_leaf_count; // how many leaf values of the input we've processed uint32_t rep[NZ_BFRSZ]; // circular buffer of repetition level values uint32_t def[NZ_BFRSZ]; // circular buffer of definition level values const uint8_t *lvl_start[NUM_LEVEL_TYPES]; // [def,rep] int32_t lvl_count[NUM_LEVEL_TYPES]; // how many of each of the streams we've decoded int32_t row_index_lower_bound; // lower bound of row indices we should process }; /** * @brief Computes a 32-bit hash when given a byte stream and range. * * MurmurHash3_32 implementation from * https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp * * MurmurHash3 was written by Austin Appleby, and is placed in the public * domain. The author hereby disclaims copyright to this source code. * * @param[in] key The input data to hash * @param[in] len The length of the input data * @param[in] seed An initialization value * * @return The hash value */ __device__ uint32_t device_str2hash32(const char *key, size_t len, uint32_t seed = 33) { const uint8_t *p = reinterpret_cast<const uint8_t *>(key); uint32_t h1 = seed, k1; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; int l = len; // body while (l >= 4) { k1 = p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); k1 *= c1; k1 = rotl32(k1, 15); k1 *= c2; h1 ^= k1; h1 = rotl32(h1, 13); h1 = h1 * 5 + 0xe6546b64; p += 4; l -= 4; } // tail k1 = 0; switch (l) { case 3: k1 ^= p[2] << 16; case 2: k1 ^= p[1] << 8; case 1: k1 ^= p[0]; k1 *= c1; k1 = rotl32(k1, 15); k1 *= c2; h1 ^= k1; } // finalization h1 ^= len; h1 ^= h1 >> 16; h1 *= 0x85ebca6b; h1 ^= h1 >> 13; h1 *= 0xc2b2ae35; h1 ^= h1 >> 16; return h1; } /** * @brief Read a 32-bit varint integer * * @param[in,out] cur The current data position, updated after the read * @param[in] end The end data position * * @return The 32-bit value read */ inline __device__ uint32_t get_vlq32(const uint8_t *&cur, const uint8_t *end) { uint32_t v = *cur++; if (v >= 0x80 && cur < end) { v = (v & 0x7f) | ((*cur++) << 7); if (v >= (0x80 << 7) && cur < end) { v = (v & ((0x7f << 7) | 0x7f)) | ((*cur++) << 14); if (v >= (0x80 << 14) && cur < end) { v = (v & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 21); if (v >= (0x80 << 21) && cur < end) { v = (v & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 28); } } } } return v; } /** * @brief Parse the beginning of the level section (definition or repetition), * initializes the initial RLE run & value, and returns the section length * * @param[in,out] s The page state * @param[in] cur The current data position * @param[in] end The end of the data * @param[in] level_bits The bits required * * @return The length of the section */ __device__ uint32_t InitLevelSection(page_state_s *s, const uint8_t *cur, const uint8_t *end, level_type lvl) { int32_t len; int level_bits = s->col.level_bits[lvl]; int encoding = lvl == level_type::DEFINITION ? s->page.definition_level_encoding : s->page.repetition_level_encoding; if (level_bits == 0) { len = 0; s->initial_rle_run[lvl] = s->page.num_input_values * 2; // repeated value s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else if (encoding == RLE) { if (cur + 4 < end) { uint32_t run; len = 4 + (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24); cur += 4; run = get_vlq32(cur, end); s->initial_rle_run[lvl] = run; if (!(run & 1)) { int v = (cur < end) ? cur[0] : 0; cur++; if (level_bits > 8) { v |= ((cur < end) ? cur[0] : 0) << 8; cur++; } s->initial_rle_value[lvl] = v; } s->lvl_start[lvl] = cur; if (cur > end) { s->error = 2; } } else { len = 0; s->error = 2; } } else if (encoding == BIT_PACKED) { len = (s->page.num_input_values * level_bits + 7) >> 3; s->initial_rle_run[lvl] = ((s->page.num_input_values + 7) >> 3) * 2 + 1; // literal run s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; } else { s->error = 3; len = 0; } return (uint32_t)len; } /** * @brief Decode values out of a definition or repetition stream * * @param[in,out] s Page state input/output * @param[in] t target_count Target count of stream values on output * @param[in] t Warp0 thread ID (0..31) * @param[in] lvl The level type we are decoding - DEFINITION or REPETITION */ __device__ void gpuDecodeStream( uint32_t *output, page_state_s *s, int32_t target_count, int t, level_type lvl) { const uint8_t *cur_def = s->lvl_start[lvl]; const uint8_t *end = s->data_start; uint32_t level_run = s->initial_rle_run[lvl]; int32_t level_val = s->initial_rle_value[lvl]; int level_bits = s->col.level_bits[lvl]; int32_t num_input_values = s->num_input_values; int32_t value_count = s->lvl_count[lvl]; int32_t batch_coded_count = 0; while (value_count < target_count && value_count < num_input_values) { int batch_len; if (level_run <= 1) { // Get a new run symbol from the byte stream int sym_len = 0; if (!t) { const uint8_t *cur = cur_def; if (cur < end) { level_run = get_vlq32(cur, end); } if (!(level_run & 1)) { if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8) { if (cur < end) level_val |= cur[0] << 8; cur++; } } if (cur > end || level_run <= 1) { s->error = 0x10; } sym_len = (int32_t)(cur - cur_def); __threadfence_block(); } sym_len = SHFL0(sym_len); level_val = SHFL0(level_val); level_run = SHFL0(level_run); cur_def += sym_len; } if (s->error) { break; } batch_len = min(num_input_values - value_count, 32); if (level_run & 1) { // Literal run int batch_len8; batch_len = min(batch_len, (level_run >> 1) * 8); batch_len8 = (batch_len + 7) >> 3; if (t < batch_len) { int bitpos = t * level_bits; const uint8_t *cur = cur_def + (bitpos >> 3); bitpos &= 7; if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8 - bitpos && cur < end) { level_val |= cur[0] << 8; cur++; if (level_bits > 16 - bitpos && cur < end) level_val |= cur[0] << 16; } level_val = (level_val >> bitpos) & ((1 << level_bits) - 1); } level_run -= batch_len8 * 2; cur_def += batch_len8 * level_bits; } else { // Repeated value batch_len = min(batch_len, level_run >> 1); level_run -= batch_len * 2; } if (t < batch_len) { int idx = value_count + t; output[idx & (NZ_BFRSZ - 1)] = level_val; } batch_coded_count += batch_len; value_count += batch_len; } // update the stream info if (!t) { s->lvl_start[lvl] = cur_def; s->initial_rle_run[lvl] = level_run; s->initial_rle_value[lvl] = level_val; s->lvl_count[lvl] = value_count; } } /** * @brief Performs RLE decoding of dictionary indexes * * @param[in,out] s Page state input/output * @param[in] target_pos Target index position in dict_idx buffer (may exceed this value by up to * 31) * @param[in] t Warp1 thread ID (0..31) * * @return The new output position */ __device__ int gpuDecodeDictionaryIndices(volatile page_state_s *s, int target_pos, int t) { const uint8_t *end = s->data_end; int dict_bits = s->dict_bits; int pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t *cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value int bytecnt = (dict_bits + 7) >> 3; if (cur + bytecnt <= end) { int32_t run_val = cur[0]; if (bytecnt > 1) { run_val |= cur[1] << 8; if (bytecnt > 2) { run_val |= cur[2] << 16; if (bytecnt > 3) { run_val |= cur[3] << 24; } } } s->dict_val = run_val & ((1 << dict_bits) - 1); } cur += bytecnt; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8 * dict_bits; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } SYNCWARP(); is_literal = SHFL0(is_literal); batch_len = SHFL0(batch_len); if (t < batch_len) { int dict_idx = s->dict_val; if (is_literal) { int32_t ofs = (t - ((batch_len + 7) & ~7)) * dict_bits; const uint8_t *p = s->data_start + (ofs >> 3); ofs &= 7; if (p < end) { uint32_t c = 8 - ofs; dict_idx = (*p++) >> ofs; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; } } } dict_idx &= (1 << dict_bits) - 1; } } s->dict_idx[(pos + t) & (NZ_BFRSZ - 1)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Performs RLE decoding of dictionary indexes, for when dict_size=1 * * @param[in,out] s Page state input/output * @param[in] target_pos Target write position * @param[in] t Thread ID * * @return The new output position */ __device__ int gpuDecodeRleBooleans(volatile page_state_s *s, int target_pos, int t) { const uint8_t *end = s->data_end; int pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; const uint8_t *cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value s->dict_val = (cur < end) ? cur[0] & 1 : 0; cur++; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); if (batch_len >= 8) { batch_len &= ~7; } batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } SYNCWARP(); is_literal = SHFL0(is_literal); batch_len = SHFL0(batch_len); if (t < batch_len) { int dict_idx; if (is_literal) { int32_t ofs = t - ((batch_len + 7) & ~7); const uint8_t *p = s->data_start + (ofs >> 3); dict_idx = (p < end) ? (p[0] >> (ofs & 7u)) & 1 : 0; } else { dict_idx = s->dict_val; } s->dict_idx[(pos + t) & (NZ_BFRSZ - 1)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Parses the length and position of strings * * @param[in,out] s Page state input/output * @param[in] target_pos Target output position * @param[in] t Thread ID * * @return The new output position */ __device__ void gpuInitStringDescriptors(volatile page_state_s *s, int target_pos, int t) { int pos = s->dict_pos; // This step is purely serial if (!t) { const uint8_t *cur = s->data_start; int dict_size = s->dict_size; int k = s->dict_val; while (pos < target_pos) { int len; if (k + 4 <= dict_size) { len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24); k += 4; if (k + len > dict_size) { len = 0; } } else { len = 0; } s->dict_idx[pos & (NZ_BFRSZ - 1)] = k; s->str_len[pos & (NZ_BFRSZ - 1)] = len; k += len; pos++; } s->dict_val = k; __threadfence_block(); } } /** * @brief Output a string descriptor * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash) */ inline __device__ void gpuOutputString(volatile page_state_s *s, int src_pos, void *dstv) { const char *ptr = NULL; size_t len = 0; if (s->dict_base) { // String dictionary uint32_t dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] * sizeof(nvstrdesc_s) : 0; if (dict_pos < (uint32_t)s->dict_size) { const nvstrdesc_s *src = reinterpret_cast<const nvstrdesc_s *>(s->dict_base + dict_pos); ptr = src->ptr; len = src->count; } } else { // Plain encoding uint32_t dict_pos = s->dict_idx[src_pos & (NZ_BFRSZ - 1)]; if (dict_pos <= (uint32_t)s->dict_size) { ptr = reinterpret_cast<const char *>(s->data_start + dict_pos); len = s->str_len[src_pos & (NZ_BFRSZ - 1)]; } } if (s->dtype_len == 4) { // Output hash *reinterpret_cast<uint32_t *>(dstv) = device_str2hash32(ptr, len); } else { // Output string descriptor nvstrdesc_s *dst = reinterpret_cast<nvstrdesc_s *>(dstv); dst->ptr = ptr; dst->count = len; } } /** * @brief Output a boolean * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputBoolean(volatile page_state_s *s, int src_pos, uint8_t *dst) { *dst = s->dict_idx[src_pos & (NZ_BFRSZ - 1)]; } /** * @brief Store a 32-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint32_t *dst, const uint8_t *src8, uint32_t dict_pos, uint32_t dict_size) { uint32_t bytebuf; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { bytebuf = *(const uint32_t *)(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *(const uint32_t *)(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } *dst = bytebuf; } /** * @brief Store a 64-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint2 *dst, const uint8_t *src8, uint32_t dict_pos, uint32_t dict_size) { uint2 v; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { v.x = *(const uint32_t *)(src8 + dict_pos + 0); v.y = *(const uint32_t *)(src8 + dict_pos + 4); if (ofs) { uint32_t next = *(const uint32_t *)(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } } else { v.x = v.y = 0; } *dst = v; } /** * @brief Convert an INT96 Spark timestamp to 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputInt96Timestamp(volatile page_state_s *s, int src_pos, int64_t *dst) { const uint8_t *src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint3 v; int64_t nanos, secs, days; v.x = *(const uint32_t *)(src8 + dict_pos + 0); v.y = *(const uint32_t *)(src8 + dict_pos + 4); v.z = *(const uint32_t *)(src8 + dict_pos + 8); if (ofs) { uint32_t next = *(const uint32_t *)(src8 + dict_pos + 12); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, v.z, ofs); v.z = __funnelshift_r(v.z, next, ofs); } nanos = v.y; nanos <<= 32; nanos |= v.x; // Convert from Julian day at noon to UTC seconds days = static_cast<int32_t>(v.z); secs = (days - 2440588) * (24 * 60 * 60); // TBD: Should be noon instead of midnight, but this matches pyarrow if (s->col.ts_clock_rate) ts = (secs * s->col.ts_clock_rate) + nanos / (1000000000 / s->col.ts_clock_rate); // Output to desired clock rate else ts = (secs * 1000000000) + nanos; } else { ts = 0; } *dst = ts; } /** * @brief Output a 64-bit timestamp * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ inline __device__ void gpuOutputInt64Timestamp(volatile page_state_s *s, int src_pos, int64_t *dst) { const uint8_t *src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint2 v; int64_t val; int32_t ts_scale; v.x = *(const uint32_t *)(src8 + dict_pos + 0); v.y = *(const uint32_t *)(src8 + dict_pos + 4); if (ofs) { uint32_t next = *(const uint32_t *)(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } val = v.y; val <<= 32; val |= v.x; // Output to desired clock rate ts_scale = s->ts_scale; if (ts_scale < 0) { // round towards negative infinity int sign = (val < 0); ts = ((val + sign) / -ts_scale) + sign; } else { ts = val * ts_scale; } } else { ts = 0; } *dst = ts; } /** * @brief Powers of 10 */ static const __device__ __constant__ double kPow10[40] = { 1.0, 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7, 1.e8, 1.e9, 1.e10, 1.e11, 1.e12, 1.e13, 1.e14, 1.e15, 1.e16, 1.e17, 1.e18, 1.e19, 1.e20, 1.e21, 1.e22, 1.e23, 1.e24, 1.e25, 1.e26, 1.e27, 1.e28, 1.e29, 1.e30, 1.e31, 1.e32, 1.e33, 1.e34, 1.e35, 1.e36, 1.e37, 1.e38, 1.e39, }; /** * @brief Output a decimal type ([INT32..INT128] + scale) as a 64-bit float * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data * @param[in] dtype Stored data type */ inline __device__ void gpuOutputDecimal(volatile page_state_s *s, int src_pos, double *dst, int dtype) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size, dtype_len_in; int64_t i128_hi, i128_lo; int32_t scale; double d; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dtype_len_in = s->dtype_len_in; dict_pos *= dtype_len_in; // FIXME: Not very efficient (currently reading 1 byte at a time) -> need a variable-length // unaligned load utility function (both little-endian and big-endian versions) if (dtype == INT32) { int32_t lo32 = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; lo32 |= v << (i * 8); } i128_lo = lo32; i128_hi = lo32 >> 31; } else if (dtype == INT64) { int64_t lo64 = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint64_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; lo64 |= v << (i * 8); } i128_lo = lo64; i128_hi = lo64 >> 63; } else // if (dtype == FIXED_LENGTH_BYTE_ARRAY) { i128_lo = 0; for (unsigned int i = dtype_len_in - min(dtype_len_in, 8); i < dtype_len_in; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; i128_lo = (i128_lo << 8) | v; } if (dtype_len_in > 8) { i128_hi = 0; for (unsigned int i = dtype_len_in - min(dtype_len_in, 16); i < dtype_len_in - 8; i++) { uint32_t v = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; i128_hi = (i128_hi << 8) | v; } if (dtype_len_in < 16) { i128_hi <<= 64 - (dtype_len_in - 8) * 8; i128_hi >>= 64 - (dtype_len_in - 8) * 8; } } else { if (dtype_len_in < 8) { i128_lo <<= 64 - dtype_len_in * 8; i128_lo >>= 64 - dtype_len_in * 8; } i128_hi = i128_lo >> 63; } } scale = s->col.decimal_scale; d = Int128ToDouble_rn(i128_lo, i128_hi); *dst = (scale < 0) ? (d * kPow10[min(-scale, 39)]) : (d / kPow10[min(scale, 39)]); } /** * @brief Output a small fixed-length value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T> inline __device__ void gpuOutputFast(volatile page_state_s *s, int src_pos, T *dst) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; gpuStoreOutput(dst, dict, dict_pos, dict_size); } /** * @brief Output a N-byte value * * @param[in,out] s Page state input/output * @param[in] src_pos Source position * @param[in] dst8 Pointer to row output data * @param[in] len Length of element */ static __device__ void gpuOutputGeneric(volatile page_state_s *s, int src_pos, uint8_t *dst8, int len) { const uint8_t *dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? s->dict_idx[src_pos & (NZ_BFRSZ - 1)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; if (len & 3) { // Generic slow path for (unsigned int i = 0; i < len; i++) { dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; } } else { // Copy 4 bytes at a time const uint8_t *src8 = dict; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits for (unsigned int i = 0; i < len; i += 4) { uint32_t bytebuf; if (dict_pos < dict_size) { bytebuf = *(const uint32_t *)(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *(const uint32_t *)(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } dict_pos += 4; *(uint32_t *)(dst8 + i) = bytebuf; } } } /** * @brief Sets up block-local page state information from the global pages. * * @param[in, out] s The local page state to be filled in * @param[in] p The global page to be copied from * @param[in] chunks The global list of chunks * @param[in] num_rows Maximum number of rows to read * @param[in] min_row crop all rows below min_row * @param[in] num_chunk Number of column chunks */ static __device__ bool setupLocalPageInfo(page_state_s *const s, PageInfo *p, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks) { int t = threadIdx.x; int chunk_idx; // Fetch page info // NOTE: Assumes that sizeof(PageInfo) <= 256 (and is padded to 4 bytes) if (t < sizeof(PageInfo) / sizeof(uint32_t)) { ((uint32_t *)&s->page)[t] = ((const uint32_t *)p)[t]; } __syncthreads(); if (s->page.flags & PAGEINFO_FLAGS_DICTIONARY) { return false; } // Fetch column chunk info chunk_idx = s->page.chunk_idx; if ((uint32_t)chunk_idx < (uint32_t)num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 256 (and is padded to 4 bytes) if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { ((uint32_t *)&s->col)[t] = ((const uint32_t *)&chunks[chunk_idx])[t]; } } // zero nested value and valid counts int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].valid_count = 0; s->page.nesting[d + t].value_count = 0; } d += blockDim.x; } __syncthreads(); if (!t) { s->error = 0; // our starting row (absolute index) is // col.start_row == absolute row index // page.chunk-row == relative row index within the chunk size_t page_start_row = s->col.start_row + s->page.chunk_row; // IMPORTANT : nested schemas can have 0 rows in a page but still have // values. The case is: // - On page N-1, the last row starts, with 2/6 values encoded // - On page N, the remaining 4/6 values are encoded, but there are no new rows. // if (s->page.num_input_values > 0 && s->page.num_rows > 0) { if (s->page.num_input_values > 0) { uint8_t *cur = s->page.page_data; uint8_t *end = cur + s->page.uncompressed_page_size; uint32_t dtype_len_out = s->col.data_type >> 3; s->ts_scale = 0; // Validate data type switch (s->col.data_type & 7) { case BOOLEAN: s->dtype_len = 1; // Boolean are stored as 1 byte on the output break; case INT32: case FLOAT: s->dtype_len = 4; break; case INT64: if (s->col.ts_clock_rate) { int32_t units = 0; if (s->col.converted_type == TIME_MICROS || s->col.converted_type == TIMESTAMP_MICROS) units = 1000000; else if (s->col.converted_type == TIME_MILLIS || s->col.converted_type == TIMESTAMP_MILLIS) units = 1000; if (units && units != s->col.ts_clock_rate) s->ts_scale = (s->col.ts_clock_rate < units) ? -(units / s->col.ts_clock_rate) : (s->col.ts_clock_rate / units); } // Fall through to DOUBLE case DOUBLE: s->dtype_len = 8; break; case INT96: s->dtype_len = 12; break; case BYTE_ARRAY: s->dtype_len = sizeof(nvstrdesc_s); break; default: // FIXED_LEN_BYTE_ARRAY: s->dtype_len = dtype_len_out; s->error |= (s->dtype_len <= 0); break; } // Special check for downconversions s->dtype_len_in = s->dtype_len; if (s->col.converted_type == DECIMAL) { s->dtype_len = 8; // Convert DECIMAL to 64-bit float } else if ((s->col.data_type & 7) == INT32) { if (dtype_len_out == 1) s->dtype_len = 1; // INT8 output if (dtype_len_out == 2) s->dtype_len = 2; // INT16 output } else if ((s->col.data_type & 7) == BYTE_ARRAY && dtype_len_out == 4) { s->dtype_len = 4; // HASH32 output } else if ((s->col.data_type & 7) == INT96) { s->dtype_len = 8; // Convert to 64-bit timestamp } // first row within the page to start reading if (page_start_row >= min_row) { s->first_row = 0; } else { s->first_row = (int32_t)min(min_row - page_start_row, (size_t)s->page.num_rows); } // # of rows within the page to read s->num_rows = s->page.num_rows; if ((page_start_row + s->first_row) + s->num_rows > min_row + num_rows) { s->num_rows = (int32_t)max((int64_t)(min_row + num_rows - (page_start_row + s->first_row)), INT64_C(0)); } // during the decoding step we need to offset the global output buffers // for each level of nesting so that we write to the section this page // is reponsible for. // - for flat schemas, we can do this directly by using row counts // - for nested schemas, these offsets are computed during the preprocess step if (s->col.column_data_base != nullptr) { int max_depth = s->col.max_level[level_type::REPETITION]; for (int idx = 0; idx <= max_depth; idx++) { PageNestingInfo *pni = &s->page.nesting[idx]; size_t output_offset; if (max_depth == 0) { output_offset = page_start_row >= min_row ? page_start_row - min_row : 0; } // for nested schemas, we've already got the exactly value precomputed else { output_offset = pni->page_start_value; } // anything below max depth is an offset uint32_t len = idx < max_depth ? sizeof(cudf::size_type) : s->dtype_len; pni->data_out = reinterpret_cast<uint8_t *>(s->col.column_data_base[idx]) + (output_offset * len); pni->valid_map = s->col.valid_map_base[idx]; if (pni->valid_map != nullptr) { pni->valid_map += output_offset >> 5; pni->valid_map_offset = (int32_t)(output_offset & 0x1f); } } } s->first_output_value = 0; // Find the compressed size of repetition levels cur += InitLevelSection(s, cur, end, level_type::REPETITION); // Find the compressed size of definition levels cur += InitLevelSection(s, cur, end, level_type::DEFINITION); s->dict_bits = 0; s->dict_base = 0; s->dict_size = 0; switch (s->page.encoding) { case PLAIN_DICTIONARY: case RLE_DICTIONARY: // RLE-packed dictionary indices, first byte indicates index length in bits if (((s->col.data_type & 7) == BYTE_ARRAY) && (s->col.str_dict_index)) { // String dictionary: use index s->dict_base = reinterpret_cast<const uint8_t *>(s->col.str_dict_index); s->dict_size = s->col.page_info[0].num_input_values * sizeof(nvstrdesc_s); } else { s->dict_base = s->col.page_info[0].page_data; // dictionary is always stored in the first page s->dict_size = s->col.page_info[0].uncompressed_page_size; } s->dict_run = 0; s->dict_val = 0; s->dict_bits = (cur < end) ? *cur++ : 0; if (s->dict_bits > 32 || !s->dict_base) { s->error = (10 << 8) | s->dict_bits; } break; case PLAIN: s->dict_size = static_cast<int32_t>(end - cur); s->dict_val = 0; if ((s->col.data_type & 7) == BOOLEAN) { s->dict_run = s->dict_size * 2 + 1; } break; case RLE: s->dict_run = 0; break; default: s->error = 1; // Unsupported encoding break; } if (cur > end) { s->error = 1; } s->data_start = cur; s->data_end = end; } else { s->error = 1; } s->lvl_count[level_type::REPETITION] = 0; s->lvl_count[level_type::DEFINITION] = 0; s->nz_count = 0; s->num_input_values = s->page.num_input_values; s->dict_pos = 0; s->out_pos = 0; // handle row bounds (skip_rows, min_rows) s->input_row_count = s->first_row; // return the lower bound to compare (page-relative) thread row index against. Explanation: // In the case of nested schemas, rows can span page boundaries. That is to say, // we can encounter the first value for row X on page M, but the last value for page M // might not be the last value for row X. page M+1 (or further) may contain the last value. // // This means that the first values we encounter for a given page (M+1) may not belong to the // row indicated by chunk_row, but to the row before it that spanned page boundaries. If that // previous row is within the overall row bounds, include the values by allowing relative row // index -1 int max_row = (min_row + num_rows) - 1; if (min_row < page_start_row && max_row >= page_start_row - 1) { s->row_index_lower_bound = -1; } else { s->row_index_lower_bound = s->first_row; } // if we're in the decoding step, jump directly to the first // value we care about if (s->col.column_data_base != nullptr) { // for flat hierarchies, we haven't computed skipped_values yet, but we can do so trivially // now if (s->col.max_level[level_type::REPETITION] == 0) { s->page.skipped_values = s->first_row; s->page.skipped_leaf_values = s->first_row; } s->input_value_count = s->page.skipped_values; } else { s->input_value_count = 0; s->input_leaf_count = 0; s->page.skipped_values = -1; s->page.skipped_leaf_values = -1; } __threadfence_block(); } __syncthreads(); return true; } /** * @brief Store a validity mask containing value_count bits into the output validity buffer of the * page. * * @param[in,out] pni The page/nesting information to store the mask in. The validity map offset is * also updated * @param[in] valid_mask The validity mask to be stored * @param[in] value_count # of bits in the validity mask */ static __device__ void store_validity(PageNestingInfo *pni, uint32_t valid_mask, int32_t value_count) { int word_offset = pni->valid_map_offset / 32; int bit_offset = pni->valid_map_offset % 32; // if we fit entirely in the output word if (bit_offset + value_count <= 32) { uint32_t relevant_mask = static_cast<uint32_t>((static_cast<uint64_t>(1) << value_count) - 1); if (relevant_mask == ~0) { pni->valid_map[word_offset] = valid_mask; } else { atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, (valid_mask & relevant_mask) << bit_offset); } } // we're going to spill over into the next word. // note : writing both values here is the lazy/slow way. we could be writing just // the first word and rolling the remaining bits over into the next call. // however, some basic performance tests shows almost no difference between these two // methods. More detailed performance testing might be worthwhile here. else { uint32_t bits_left = 32 - bit_offset; // first word. strip bits_left bits off the beginning and store that uint32_t relevant_mask = ((1 << bits_left) - 1); uint32_t mask_word0 = valid_mask & relevant_mask; atomicAnd(pni->valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(pni->valid_map + word_offset, mask_word0 << bit_offset); // second word. strip the remainder of the bits off the end and store that relevant_mask = ((1 << (value_count - bits_left)) - 1); uint32_t mask_word1 = valid_mask & (relevant_mask << bits_left); atomicAnd(pni->valid_map + word_offset + 1, ~(relevant_mask)); atomicOr(pni->valid_map + word_offset + 1, mask_word1 >> bits_left); } pni->valid_map_offset += value_count; } /** * @brief Process a batch of incoming repetition/definition level values and generate * validity, nested column offsets (where appropriate) and decoding indices. * * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] s Local page information * @param[in] t Thread index */ static __device__ void gpuUpdateValidityOffsetsAndRowIndices(int32_t target_input_value_count, page_state_s *s, int t) { // max nesting depth of the column int max_depth = s->col.max_level[level_type::REPETITION]; // how many (input) values we've processed in the page so far int input_value_count = s->input_value_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; // process until we've reached the target while (input_value_count < target_input_value_count) { // determine the nesting bounds for this thread int start_depth = -1; int end_depth = -1; int d = -1; if (input_value_count + t < target_input_value_count) { int index = rolling_index(input_value_count + t); // important : we don't decode repetition levels for flat schemas. we can assume the // repetition level is always 0. int r = max_depth == 0 ? 0 : s->rep[index]; start_depth = r; d = s->def[index]; end_depth = s->page.nesting[d].d_remap; } // 4 interesting things to track: // thread_value_count : # of output values from the view of this thread // warp_value_count : # of output values for the whole warp // // thread_valid_count : # of valid values from the view of this thread // warp_valid_count : # of valid values for the whole warp uint32_t thread_value_count, warp_value_count; uint32_t thread_valid_count, warp_valid_count; // track (page-relative) row index for the thread so we can compare against input bounds // keep track of overall # of rows we've read. int is_new_row = start_depth == 0 ? 1 : 0; uint32_t warp_row_count_mask = BALLOT(is_new_row); int32_t thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); input_row_count += __popc(warp_row_count_mask); // is this thread within row bounds? int in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; // compute warp and thread value counts uint32_t warp_count_mask = BALLOT((0 >= start_depth && 0 <= end_depth) && in_row_bounds ? 1 : 0); warp_value_count = __popc(warp_count_mask); // Note : ((1 << t) - 1) implies "for all threads before me" thread_value_count = __popc(warp_count_mask & ((1 << t) - 1)); // always walk from 0 to max_depth even if our start and end depths are different. // otherwise we'd have thread/warp synchronization issues on the BALLOT() and WarpReduce() // calls. uint32_t next_thread_value_count, next_warp_value_count; for (int s_idx = 0; s_idx <= max_depth; s_idx++) { PageNestingInfo *pni = &s->page.nesting[s_idx]; int in_bounds = ((s_idx >= start_depth && s_idx <= end_depth) && in_row_bounds) ? 1 : 0; // everything up to the max_def_level is a real value uint32_t is_valid = 0; if (d >= pni->max_def_level && in_bounds) { is_valid = 1; } // compute warp and thread valid counts uint32_t warp_valid_mask; // for flat schemas, a simple ballot_sync gives us the correct count and bit positions because // every value in the input matches to a value in the output if (max_depth == 0) { warp_valid_mask = BALLOT(is_valid); } // for nested schemas, it's more complicated. This warp will visit 32 incoming values, // however not all of them will necessarily represent a value at this nesting level. so the // validity bit for thread t might actually represent output value t-6. the correct position // for thread t's bit is cur_value_count. for cuda 11 we could use __reduce_or_sync(), but // until then we have to do a warp reduce. else { warp_valid_mask = WarpReduceOr32(is_valid << thread_value_count); } thread_valid_count = __popc(warp_valid_mask & ((1 << thread_value_count) - 1)); warp_valid_count = __popc(warp_valid_mask); // if this is the value column emit an index if (is_valid && s_idx == max_depth) { int idx = pni->valid_count + thread_valid_count; int ofs = pni->value_count + thread_value_count; s->nz_idx[rolling_index(idx)] = ofs; } // compute warp and thread value counts for the -next- nesting level. we need to // do this for here nested schemas so that we can emit an offset for the -current- nesting // level. more concretely : the offset for the current nesting level == current length of the // next nesting level if (s_idx < max_depth) { uint32_t next_warp_count_mask = BALLOT((s_idx + 1 >= start_depth && s_idx + 1 <= end_depth && in_row_bounds) ? 1 : 0); next_warp_value_count = __popc(next_warp_count_mask); next_thread_value_count = __popc(next_warp_count_mask & ((1 << t) - 1)); // if we're -not- at a leaf column, and we're within row bounds, emit an offset if (in_bounds) { int idx = pni->value_count + thread_value_count; cudf::size_type ofs = s->page.nesting[s_idx + 1].value_count + next_thread_value_count + s->page.nesting[s_idx + 1].page_start_value; (reinterpret_cast<cudf::size_type *>(pni->data_out))[idx] = ofs; } } // increment count of valid values, count of total values, and validity mask if (!t) { if (pni->valid_map != nullptr) { store_validity(pni, warp_valid_mask, warp_value_count); } pni->valid_count += warp_valid_count; pni->value_count += warp_value_count; } // propagate value counts for the next level warp_value_count = next_warp_value_count; thread_value_count = next_thread_value_count; } input_value_count += min(32, (target_input_value_count - input_value_count)); SYNCWARP(); } // update if (!t) { // update valid value count for decoding and total # of values we've processed s->nz_count = s->page.nesting[max_depth].valid_count; s->input_value_count = input_value_count; s->input_row_count = input_row_count; } } /** * @brief Process repetition and definition levels up to the target count of leaf values. * * In order to decode actual leaf values from the input stream, we need to generate the * list of non-null value positions (page_state_s::nz_idx). We do this by processing * the repetition and definition level streams. This process also generates validity information, * and offset column values in the case of nested schemas. Because of the way the streams * are encoded, this function may generate slightly more than target_leaf_count. * * Only runs on 1 warp. * * @param[in] s The local page state * @param[in] target_leaf_count Target count of non-null leaf values to generate indices for * @param[in] t Thread index */ __device__ void gpuDecodeLevels(page_state_s *s, int32_t target_leaf_count, int t) { int max_depth = s->col.max_level[level_type::REPETITION]; constexpr int batch_size = 32; int cur_leaf_count = target_leaf_count; while (!s->error && s->nz_count < target_leaf_count && s->input_value_count < s->num_input_values) { // only need to decode repetition levels in the case of a nested schema if (max_depth > 0) { gpuDecodeStream(s->rep, s, cur_leaf_count, t, level_type::REPETITION); } gpuDecodeStream(s->def, s, cur_leaf_count, t, level_type::DEFINITION); SYNCWARP(); // because the rep and def streams are encoded seperately, we cannot request an exact // # of values to be decoded at once. we can only process the lowest # of decoded rep/def // levels we get. int actual_leaf_count = max_depth > 0 ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdateValidityOffsetsAndRowIndices(actual_leaf_count, s, t); cur_leaf_count = actual_leaf_count + batch_size; SYNCWARP(); } } /** * @brief Process a batch of incoming repetition/definition level values to generate * per-nesting level output column size for this page. * * Each page represents one piece of the overall output column. The total output (cudf) * column sizes are the sum of the values in each individual page. * * @param[in] s The local page info * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] t Thread index * @param[in] bounds_set Whether or not s->row_index_lower_bound, s->first_row and s->num_rows * have been computed for this page (they will only be set in the second/trim pass). */ static __device__ void gpuUpdatePageSizes(page_state_s *s, int32_t target_input_value_count, int t, bool bounds_set) { // max nesting depth of the column int max_depth = s->col.max_level[level_type::REPETITION]; // how many input level values we've processed in the page so far int input_value_count = s->input_value_count; // how many leaf values we've processed in the page so far int input_leaf_count = s->input_leaf_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; while (input_value_count < target_input_value_count) { // determine the nesting bounds for this thread int start_depth = -1; int end_depth = -1; int d = -1; if (input_value_count + t < target_input_value_count) { int index = rolling_index(input_value_count + t); int r = s->rep[index]; start_depth = r; d = s->def[index]; end_depth = s->page.nesting[d].d_remap; } // count rows and leaf values int is_new_row = start_depth == 0 ? 1 : 0; uint32_t warp_row_count_mask = BALLOT(is_new_row); int is_new_leaf = (d >= s->page.nesting[max_depth].max_def_level) ? 1 : 0; uint32_t warp_leaf_count_mask = BALLOT(is_new_leaf); // is this thread within row bounds? on the first pass we don't know the bounds, so we will be // computing the full size of the column. on the second pass, we will know our actual row // bounds, so the computation will cap sizes properly. int in_row_bounds = 1; if (bounds_set) { // absolute row index int32_t thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; uint32_t row_bounds_mask = BALLOT(in_row_bounds); int first_thread_in_range = __ffs(row_bounds_mask) - 1; // if we've found the beginning of the first row, mark down the position // in the def/repetition buffer (skipped_values) and the data buffer (skipped_leaf_values) if (!t && first_thread_in_range >= 0 && s->page.skipped_values < 0) { // how many values we've skipped in the rep/def levels s->page.skipped_values = input_value_count + first_thread_in_range; // how many values we've skipped in the actual data stream s->page.skipped_leaf_values = input_leaf_count + __popc(warp_leaf_count_mask & ((1 << first_thread_in_range) - 1)); } } // increment counts across all nesting depths for (int s_idx = 0; s_idx <= max_depth; s_idx++) { int in_bounds = (s_idx >= start_depth && s_idx <= end_depth && in_row_bounds) ? 1 : 0; uint32_t count_mask = BALLOT(in_bounds); if (!t) { s->page.nesting[s_idx].size += __popc(count_mask); } } input_value_count += min(32, (target_input_value_count - input_value_count)); input_row_count += __popc(warp_row_count_mask); input_leaf_count += __popc(warp_leaf_count_mask); } // update final page value count if (!t) { s->input_value_count = target_input_value_count; s->input_leaf_count = input_leaf_count; s->input_row_count = input_row_count; } } /** * @brief Kernel for computing per-page column size information for all nesting levels. * * This function will write out the size field for each level of nesting. * * @param[in,out] pages List of pages * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks * @param[in] min_row Row index to start reading at * @param[in] num_rows Maximum number of rows to read * @param[in] num_chunks Number of column chunks * @param[in] trim_pass Whether or not this is the trim pass. We first have to compute * the full size information of every page before we come through in a second (trim) pass * to determine what subset of rows in this page we should be reading. */ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuComputePageSizes(PageInfo *pages, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks, bool trim_pass) { __shared__ __align__(16) page_state_s state_g; page_state_s *const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; PageInfo *pp = &pages[page_idx]; if (!setupLocalPageInfo( s, pp, chunks, trim_pass ? min_row : 0, trim_pass ? num_rows : INT_MAX, num_chunks)) { return; } // zero sizes int d = 0; while (d < s->page.num_nesting_levels) { if (d + t < s->page.num_nesting_levels) { s->page.nesting[d + t].size = 0; } d += blockDim.x; } if (!t) { s->page.skipped_values = -1; s->page.skipped_leaf_values = -1; s->input_row_count = 0; s->input_value_count = 0; // if this isn't the trim pass, make sure we visit absolutely everything if (!trim_pass) { s->first_row = 0; s->num_rows = INT_MAX; s->row_index_lower_bound = -1; } } __syncthreads(); // optimization : it might be useful to have a version of gpuDecodeStream that could go // wider than 1 warp. Currently it only only uses 1 warp so that it can overlap work // with the value decoding step when in the actual value decoding kernel. however during // this preprocess step we have no such limits - we could go as wide as NTHREADS if (t < 32) { constexpr int batch_size = 32; int target_input_count = batch_size; while (!s->error && s->input_value_count < s->num_input_values) { // decode repetition and definition levels. these will attempt to decode at // least up to the target, but may decode a few more. gpuDecodeStream(s->rep, s, target_input_count, t, level_type::REPETITION); gpuDecodeStream(s->def, s, target_input_count, t, level_type::DEFINITION); SYNCWARP(); // we may have decoded different amounts from each stream, so only process what we've been int actual_input_count = min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]); // process what we got back gpuUpdatePageSizes(s, actual_input_count, t, trim_pass); target_input_count = actual_input_count + batch_size; SYNCWARP(); } } // update # rows in the actual page if (!t) { pp->num_rows = s->page.nesting[0].size; pp->skipped_values = s->page.skipped_values; pp->skipped_leaf_values = s->page.skipped_leaf_values; } } /** * @brief Kernel for co the column data stored in the pages * * This function will write the page data and the page data's validity to the * output specified in the page's column chunk. If necessary, additional * conversion will be performed to translate from the Parquet datatype to * desired output datatype (ex. 32-bit to 16-bit, string to hash). * * @param[in] pages List of pages * @param[in,out] chunks List of column chunks * @param[in] min_row Row index to start reading at * @param[in] num_rows Maximum number of rows to read * @param[in] num_chunks Number of column chunks */ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuDecodePageData(PageInfo *pages, ColumnChunkDesc const *chunks, size_t min_row, size_t num_rows, int32_t num_chunks) { __shared__ __align__(16) page_state_s state_g; page_state_s *const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; int out_thread0; if (!setupLocalPageInfo(s, &pages[page_idx], chunks, min_row, num_rows, num_chunks)) { return; } if (s->dict_base) { out_thread0 = (s->dict_bits > 0) ? 64 : 32; } else { out_thread0 = ((s->col.data_type & 7) == BOOLEAN || (s->col.data_type & 7) == BYTE_ARRAY) ? 64 : 32; } uint32_t skipped_leaf_values = s->page.skipped_leaf_values; while (!s->error && (s->input_value_count < s->num_input_values || s->out_pos < s->nz_count)) { int target_pos; int out_pos = s->out_pos; if (t < out_thread0) { target_pos = min(out_pos + 2 * (NTHREADS - out_thread0), s->nz_count + (NTHREADS - out_thread0)); } else { target_pos = min(s->nz_count, out_pos + NTHREADS - out_thread0); if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); } } __syncthreads(); if (t < 32) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels(s, target_pos, t); } else if (t < out_thread0) { uint32_t src_target_pos = target_pos + skipped_leaf_values; // WARP1: Decode dictionary indices, booleans or string positions if (s->dict_base) { src_target_pos = gpuDecodeDictionaryIndices(s, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BOOLEAN) { src_target_pos = gpuDecodeRleBooleans(s, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BYTE_ARRAY) { gpuInitStringDescriptors(s, src_target_pos, t & 0x1f); } if (t == 32) { *(volatile int32_t *)&s->dict_pos = src_target_pos; } } else { // WARP1..WARP3: Decode values int dtype = s->col.data_type & 7; out_pos += t - out_thread0; uint32_t src_pos = out_pos + skipped_leaf_values; int output_value_idx = s->nz_idx[rolling_index(out_pos)]; if (out_pos < target_pos && output_value_idx >= 0 && output_value_idx < s->num_input_values) { // nesting level that is storing actual leaf values int leaf_level_index = s->col.max_level[level_type::REPETITION]; uint32_t dtype_len = s->dtype_len; uint8_t *dst = s->page.nesting[leaf_level_index].data_out + static_cast<size_t>(output_value_idx) * dtype_len; if (dtype == BYTE_ARRAY) gpuOutputString(s, src_pos, dst); else if (dtype == BOOLEAN) gpuOutputBoolean(s, src_pos, dst); else if (s->col.converted_type == DECIMAL) gpuOutputDecimal(s, src_pos, reinterpret_cast<double *>(dst), dtype); else if (dtype == INT96) gpuOutputInt96Timestamp(s, src_pos, reinterpret_cast<int64_t *>(dst)); else if (dtype_len == 8) { if (s->ts_scale) gpuOutputInt64Timestamp(s, src_pos, reinterpret_cast<int64_t *>(dst)); else gpuOutputFast(s, src_pos, reinterpret_cast<uint2 *>(dst)); } else if (dtype_len == 4) gpuOutputFast(s, src_pos, reinterpret_cast<uint32_t *>(dst)); else gpuOutputGeneric(s, src_pos, dst, dtype_len); } if (t == out_thread0) { *(volatile int32_t *)&s->out_pos = target_pos; } } __syncthreads(); } } struct chunk_row_output_iter { PageInfo *p; using value_type = size_type; using difference_type = size_type; using pointer = size_type *; using reference = size_type &; using iterator_category = thrust::output_device_iterator_tag; chunk_row_output_iter operator+ __host__ __device__(int i) { return chunk_row_output_iter{p + i}; } void operator++ __host__ __device__() { p++; } reference operator[] __device__(int i) { return p[i].chunk_row; } reference operator*__device__() { return p->chunk_row; } void operator= __device__(value_type v) { p->chunk_row = v; } }; struct start_offset_output_iterator { PageInfo *p; int col_index; int nesting_depth; int empty = 0; using value_type = size_type; using difference_type = size_type; using pointer = size_type *; using reference = size_type &; using iterator_category = thrust::output_device_iterator_tag; start_offset_output_iterator operator+ __host__ __device__(int i) { return start_offset_output_iterator{p + i, col_index, nesting_depth}; } void operator++ __host__ __device__() { p++; } reference operator[] __device__(int i) { return dereference(p + i); } reference operator*__device__() { return dereference(p); } void operator= __device__(value_type v) { if (p->column_idx == col_index && !(p->flags & PAGEINFO_FLAGS_DICTIONARY)) { p->nesting[nesting_depth].page_start_value = 2; } } private: reference __device__ dereference(PageInfo *p) { if (p->column_idx != col_index || p->flags & PAGEINFO_FLAGS_DICTIONARY) { return empty; } return p->nesting[nesting_depth].page_start_value; } }; /** * @copydoc cudf::io::parquet::gpu::PreprocessColumnData */ cudaError_t PreprocessColumnData(hostdevice_vector<PageInfo> &pages, hostdevice_vector<ColumnChunkDesc> const &chunks, std::vector<std::vector<std::pair<int, bool>>> &nested_info, size_t num_rows, size_t min_row, cudaStream_t stream) { dim3 dim_block(NTHREADS, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page // computes: // PageNestingInfo::size for each level of nesting, for each page. // The output from this does not take row bounds (num_rows, min_row) into account gpuComputePageSizes<<<dim_grid, dim_block, 0, stream>>>( pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), false); CUDA_TRY(cudaStreamSynchronize(stream)); // computes: // PageInfo::chunk_row for all pages auto key_input = thrust::make_transform_iterator( pages.device_ptr(), [] __device__(PageInfo const &page) { return page.chunk_idx; }); auto page_input = thrust::make_transform_iterator( pages.device_ptr(), [] __device__(PageInfo const &page) { return page.num_rows; }); thrust::exclusive_scan_by_key(rmm::exec_policy(stream)->on(stream), key_input, key_input + pages.size(), page_input, chunk_row_output_iter{pages.device_ptr()}); // computes: // PageNestingInfo::size for each level of nesting, for each page, taking row bounds into account. // PageInfo::skipped_values, which tells us where to start decoding in the input gpuComputePageSizes<<<dim_grid, dim_block, 0, stream>>>( pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size(), true); // retrieve pages back (PageInfo::num_rows has been set. if we don't bring it // back, this value will get overwritten later on). pages.device_to_host(stream, true); // computes: // output column sizes for each level of nesting (summing PageNestingInfo::size) // per-page start offsets for each level of nesting for (size_t idx = 0; idx < nested_info.size(); idx++) { size_t max_depth = nested_info[idx].size() - 1; for (size_t l_idx = 0; l_idx <= max_depth; l_idx++) { // column size auto page_input = thrust::make_transform_iterator( pages.device_ptr(), [idx, l_idx] __device__(PageInfo const &page) { if (page.column_idx != idx || page.flags & PAGEINFO_FLAGS_DICTIONARY) { return 0; } return page.nesting[l_idx].size; }); nested_info[idx][l_idx].first = thrust::reduce(rmm::exec_policy(stream)->on(stream), page_input, page_input + pages.size()); // add 1 for non-leaf levels for the terminating offset if (l_idx < max_depth) { nested_info[idx][l_idx].first++; } // per-page start offset auto key_input = thrust::make_transform_iterator( pages.device_ptr(), [] __device__(PageInfo const &page) { return page.column_idx; }); thrust::exclusive_scan_by_key( rmm::exec_policy(stream)->on(stream), key_input, key_input + pages.size(), page_input, start_offset_output_iterator{pages.device_ptr(), static_cast<int>(chunks[idx].dst_col_index), static_cast<int>(l_idx)}); } } return cudaSuccess; } /** * @copydoc cudf::io::parquet::gpu::DecodePageData */ cudaError_t __host__ DecodePageData(hostdevice_vector<PageInfo> &pages, hostdevice_vector<ColumnChunkDesc> const &chunks, size_t num_rows, size_t min_row, cudaStream_t stream) { dim3 dim_block(NTHREADS, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page gpuDecodePageData<<<dim_grid, dim_block, 0, stream>>>( pages.device_ptr(), chunks.device_ptr(), min_row, num_rows, chunks.size()); return cudaSuccess; } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
043c460bcf9fe0adf7c7fd288c3c80be6b8ad754.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <simulate/MonteCarlo.h> #include <simulate/SumReduction.cuh> #include <stdio.h> #define BLOCK_N 256 #define THREAD_N 256 __device__ void randNormal( MonteCarlo *plan, hiprandState_t *state, double *choMatrix, double *depend, double *independ) { int size = plan->basketSize; for (int i = 0; i < size; i++) { independ[i] = hiprand_normal(state); } for (int i = 0; i < size; i++) { double corNormal = 0; for (int j = 0; j < size; j++) { corNormal += independ[j] * choMatrix[i * size + j]; } depend[i] = corNormal; } } __device__ void sumRdx(double *s, double *d, double value) { s[threadIdx.x] = value; sumReduce<double, THREAD_N, THREAD_N>(s); if (threadIdx.x == 0) { *d = s[0]; } } __device__ double optionValue(MonteCarlo *plan, double value) { return exp(-plan->interest * plan->maturity) * (value > 0 ? value : 0); } __global__ void sumReduceKernel(MonteCarlo *plan, double *value, double *sum, double *sum2) { __shared__ double sumThread[THREAD_N]; int idx = blockIdx.x * blockDim.x + threadIdx.x; double sumPerThread = 0, sum2PerThread = 0; for (int i = idx; i < plan->pathNum; i += blockDim.x * gridDim.x) { sumPerThread += value[i]; sum2PerThread += value[i] * value[i]; } sumRdx(sumThread, &sum[blockIdx.x], sumPerThread); sumRdx(sumThread, &sum2[blockIdx.x], sum2PerThread); } void MonteCarlo::statisticGPU(MonteCarlo *plan, double *value, double &mean, double &std) { double *sum; double *sumHost; double *sum2; double *sum2Host; hipMalloc(&sum, sizeof(double) * BLOCK_N); hipMalloc(&sum2, sizeof(double) * BLOCK_N); hipHostMalloc(&sumHost, sizeof(double) * BLOCK_N); hipHostMalloc(&sum2Host, sizeof(double) * BLOCK_N); double sumRes = 0; double sum2Res = 0; hipLaunchKernelGGL(( sumReduceKernel), dim3(BLOCK_N), dim3(THREAD_N), 0, 0, plan, value, sum, sum2); hipMemcpy(sumHost, sum, BLOCK_N * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(sum2Host, sum2, BLOCK_N * sizeof(double), hipMemcpyDeviceToHost); for (int i = 0; i < BLOCK_N; i++) { sumRes += sumHost[i]; sum2Res += sum2Host[i]; } mean = sumRes / pathNum; std = std::sqrt(sum2Res / pathNum - mean * mean); hipHostFree(sumHost); hipHostFree(sum2Host); hipFree(sum); hipFree(sum2); } __global__ void covSumReduceKernel(MonteCarlo *plan, double *a, double *b, double *sum) { __shared__ double sumThread[THREAD_N]; int idx = blockIdx.x * blockDim.x + threadIdx.x; double sumPerThread = 0; for (int i = idx; i < plan->pathNum; i += blockDim.x * gridDim.x) { sumPerThread += a[i] * b[i]; } sumRdx(sumThread, &sum[blockIdx.x], sumPerThread); } double MonteCarlo::covarianceGPU(MonteCarlo *plan, double *arith, double *geo, double arithMean, double geoMean) { double *sum; double *sumHost; hipMalloc(&sum, sizeof(double) * BLOCK_N); hipHostMalloc(&sumHost, sizeof(double) * BLOCK_N); double sumRes = 0; hipLaunchKernelGGL(( covSumReduceKernel), dim3(BLOCK_N), dim3(THREAD_N), 0, 0, plan, arith, geo, sum); hipMemcpy(sumHost, sum, BLOCK_N * sizeof(double), hipMemcpyDeviceToHost); for (int i = 0; i < BLOCK_N; i++) { sumRes += sumHost[i]; } hipHostFree(sumHost); hipFree(sum); return sumRes / pathNum - arithMean * geoMean; } __global__ void variationReduceKernel(MonteCarlo *plan, double *dst, double *arithPayoff, double *geoPayoff, double theta) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = idx; i < plan->pathNum; i += blockDim.x * gridDim.x) { dst[i] = arithPayoff[i] + theta * (plan->geoExp - geoPayoff[i]); } } __global__ void monteCarloOptionKernel( MonteCarlo *plan, double *choMatrix, double *price, double *volatility, double *drift, double *currents, double *depend, double *independ, double *sum, double *sumOutput, double *sum2, double *sum2Output, double *sumX, double *sumXOutput, double *arithPayoff, double *geoPayoff) { __shared__ double sumThread[THREAD_N]; hiprandState_t state; int idx = blockIdx.x * blockDim.x + threadIdx.x; int size = plan->basketSize; int offset = idx * size; double dt = plan->maturity / plan->observation; hiprand_init(1230, idx, 0, &state); for (int i = 0; i < size; i++) { sum[offset + i] = 0; sum2[offset + i] = 0; sumX[offset + i] = 0; } for (int i = idx; i < plan->pathNum; i += blockDim.x * gridDim.x) { double arithMean = 0; double geoMean = 1; for (int j = 0; j < size; j++) { currents[offset + j] = price[j]; } for (int j = 0; j < plan->observation; j++) { randNormal(plan, &state, choMatrix, depend + offset, independ + offset); for (int j = 0; j < size; j++) { double var = depend[offset + j]; sum[offset + j] += var; sum2[offset + j] += var * var; for (int k = 0; k < size; k++) { double val = depend[offset + k]; sumX[offset * size + j * size + k] += var * val; } } for (int k = 0; k < size; k++) { double growthFactor = drift[k] * exp(volatility[k] * sqrt(dt) * depend[offset + k]); currents[offset + k] *= growthFactor; arithMean += currents[offset + k]; geoMean *= currents[offset + k]; } } arithMean /= plan->observation * size; geoMean = pow(geoMean, 1 / (double)(plan->observation * size)); if (plan->type == CALL) { arithPayoff[i] = optionValue(plan, arithMean - plan->strike); geoPayoff[i] = optionValue(plan, geoMean - plan->strike); } else if (plan->type == PUT) { arithPayoff[i] = optionValue(plan, plan->strike - arithMean); geoPayoff[i] = optionValue(plan, plan->strike - geoMean); } } for (int i = 0; i < size; i++) { sumRdx(sumThread, &sumOutput[blockIdx.x * size + i], sum[offset + i]); sumRdx(sumThread, &sum2Output[blockIdx.x * size + i], sum2[offset + i]); for (int j = 0; j < size; j++) { sumRdx(sumThread, &sumXOutput[blockIdx.x * size * size + i * size + j], sumX[offset * size + i * size + j]); } } } Result MonteCarlo::simulateGPU(double *expectation, double *covMatrix) { MonteCarlo *plan; double *pChoMatrix; double *pPrice; double *pVolatility; double *pDrift; double *currents; double *depend; double *independ; double *sum; double *sumOutput; double *sumHost; double *sum2; double *sum2Output; double *sum2Host; double *sumX; double *sumXOutput; double *sumXHost; double *arithPayoff; double *geoPayoff; int size = this->basketSize; hipMalloc(&plan, sizeof(MonteCarlo)); hipMalloc(&pChoMatrix, size * size * sizeof(double)); hipMalloc(&pPrice, size * sizeof(double)); hipMalloc(&pVolatility, size * sizeof(double)); hipMalloc(&pDrift, size * sizeof(double)); int totalThread = BLOCK_N * THREAD_N; hipMalloc(&currents, sizeof(double) * size * totalThread); hipMalloc(&depend, sizeof(double) * size * totalThread); hipMalloc(&independ, sizeof(double) * size * totalThread); hipMalloc(&sum, sizeof(double) * size * totalThread); hipMalloc(&sum2, sizeof(double) * size * totalThread); hipMalloc(&sumX, sizeof(double) * size * size * totalThread); hipMalloc(&sumOutput, sizeof(double) * size * BLOCK_N); hipMalloc(&sum2Output, sizeof(double) * size * BLOCK_N); hipMalloc(&sumXOutput, sizeof(double) * size * size * BLOCK_N); hipMalloc(&arithPayoff, this->pathNum * sizeof(double)); hipMalloc(&geoPayoff, this->pathNum * sizeof(double)); hipMemcpy(plan, this, sizeof(MonteCarlo), hipMemcpyHostToDevice); hipMemcpy(pChoMatrix, this->choMatrix, size * size * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(pPrice, this->price, size * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(pVolatility, this->volatility, size * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(pDrift, this->drift, size * sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( monteCarloOptionKernel), dim3(BLOCK_N), dim3(THREAD_N), 0, 0, plan, pChoMatrix, pPrice, pVolatility, pDrift, currents, depend, independ, sum, sumOutput, sum2, sum2Output, sumX, sumXOutput, arithPayoff, geoPayoff); double aMean, gMean, aStd, gStd; statisticGPU(plan, arithPayoff, aMean, aStd); statisticGPU(plan, geoPayoff, gMean, gStd); hipHostMalloc(&sumHost, sizeof(double) * size * BLOCK_N); hipHostMalloc(&sum2Host, sizeof(double) * size * BLOCK_N); hipHostMalloc(&sumXHost, sizeof(double) * size * size * BLOCK_N); hipMemcpy(sumHost, sumOutput, size * BLOCK_N * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(sum2Host, sum2Output, size * BLOCK_N * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(sumXHost, sumXOutput, size * size * BLOCK_N * sizeof(double), hipMemcpyDeviceToHost); for (int i = 0; i < size; i++) { expectation[i] = 0; for (int j = 0; j < size; j++) { covMatrix[i * size + j] = 0; } } for (int i = 0; i < BLOCK_N; i++) { for (int j = 0; j < size; j++) { expectation[j] += sumHost[i * size + j]; for (int k = 0; k < size; k++) { covMatrix[j * size + k] += sumXHost[i * size * size + j * size + k]; } } } int pathNum = this->pathNum; for (int i = 0; i < size; i++) { expectation[i] /= pathNum; } for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { covMatrix[i * size + j] = covMatrix[i * size + j] / pathNum - expectation[i] * expectation[j]; } } Result ret; if (isGeo) { ret.mean = gMean; ret.conf = confidence(gStd); } else { if (controlVariate) { double cov = covarianceGPU(plan, arithPayoff, geoPayoff, aMean, gMean); double theta = cov / (gStd * gStd); double *newArith; hipMalloc(&newArith, this->pathNum * sizeof(double)); hipLaunchKernelGGL(( variationReduceKernel), dim3(BLOCK_N), dim3(THREAD_N), 0, 0, plan, newArith, arithPayoff, geoPayoff, theta); statisticGPU(plan, newArith, aMean, aStd); hipFree(newArith); } ret.mean = aMean; ret.conf = confidence(aStd); } hipHostFree(sumHost); hipHostFree(sum2Host); hipFree(plan); hipFree(pChoMatrix); hipFree(pPrice); hipFree(pVolatility); hipFree(pDrift); hipFree(currents); hipFree(depend); hipFree(independ); hipFree(sum); hipFree(sum2); hipFree(sumOutput); hipFree(sum2Output); hipFree(sumXOutput); hipFree(arithPayoff); hipFree(geoPayoff); return ret; }
043c460bcf9fe0adf7c7fd288c3c80be6b8ad754.cu
#include <simulate/MonteCarlo.h> #include <simulate/SumReduction.cuh> #include <stdio.h> #define BLOCK_N 256 #define THREAD_N 256 __device__ void randNormal( MonteCarlo *plan, curandState *state, double *choMatrix, double *depend, double *independ) { int size = plan->basketSize; for (int i = 0; i < size; i++) { independ[i] = curand_normal(state); } for (int i = 0; i < size; i++) { double corNormal = 0; for (int j = 0; j < size; j++) { corNormal += independ[j] * choMatrix[i * size + j]; } depend[i] = corNormal; } } __device__ void sumRdx(double *s, double *d, double value) { s[threadIdx.x] = value; sumReduce<double, THREAD_N, THREAD_N>(s); if (threadIdx.x == 0) { *d = s[0]; } } __device__ double optionValue(MonteCarlo *plan, double value) { return exp(-plan->interest * plan->maturity) * (value > 0 ? value : 0); } __global__ void sumReduceKernel(MonteCarlo *plan, double *value, double *sum, double *sum2) { __shared__ double sumThread[THREAD_N]; int idx = blockIdx.x * blockDim.x + threadIdx.x; double sumPerThread = 0, sum2PerThread = 0; for (int i = idx; i < plan->pathNum; i += blockDim.x * gridDim.x) { sumPerThread += value[i]; sum2PerThread += value[i] * value[i]; } sumRdx(sumThread, &sum[blockIdx.x], sumPerThread); sumRdx(sumThread, &sum2[blockIdx.x], sum2PerThread); } void MonteCarlo::statisticGPU(MonteCarlo *plan, double *value, double &mean, double &std) { double *sum; double *sumHost; double *sum2; double *sum2Host; cudaMalloc(&sum, sizeof(double) * BLOCK_N); cudaMalloc(&sum2, sizeof(double) * BLOCK_N); cudaMallocHost(&sumHost, sizeof(double) * BLOCK_N); cudaMallocHost(&sum2Host, sizeof(double) * BLOCK_N); double sumRes = 0; double sum2Res = 0; sumReduceKernel<<<BLOCK_N, THREAD_N>>>(plan, value, sum, sum2); cudaMemcpy(sumHost, sum, BLOCK_N * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(sum2Host, sum2, BLOCK_N * sizeof(double), cudaMemcpyDeviceToHost); for (int i = 0; i < BLOCK_N; i++) { sumRes += sumHost[i]; sum2Res += sum2Host[i]; } mean = sumRes / pathNum; std = std::sqrt(sum2Res / pathNum - mean * mean); cudaFreeHost(sumHost); cudaFreeHost(sum2Host); cudaFree(sum); cudaFree(sum2); } __global__ void covSumReduceKernel(MonteCarlo *plan, double *a, double *b, double *sum) { __shared__ double sumThread[THREAD_N]; int idx = blockIdx.x * blockDim.x + threadIdx.x; double sumPerThread = 0; for (int i = idx; i < plan->pathNum; i += blockDim.x * gridDim.x) { sumPerThread += a[i] * b[i]; } sumRdx(sumThread, &sum[blockIdx.x], sumPerThread); } double MonteCarlo::covarianceGPU(MonteCarlo *plan, double *arith, double *geo, double arithMean, double geoMean) { double *sum; double *sumHost; cudaMalloc(&sum, sizeof(double) * BLOCK_N); cudaMallocHost(&sumHost, sizeof(double) * BLOCK_N); double sumRes = 0; covSumReduceKernel<<<BLOCK_N, THREAD_N>>>(plan, arith, geo, sum); cudaMemcpy(sumHost, sum, BLOCK_N * sizeof(double), cudaMemcpyDeviceToHost); for (int i = 0; i < BLOCK_N; i++) { sumRes += sumHost[i]; } cudaFreeHost(sumHost); cudaFree(sum); return sumRes / pathNum - arithMean * geoMean; } __global__ void variationReduceKernel(MonteCarlo *plan, double *dst, double *arithPayoff, double *geoPayoff, double theta) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = idx; i < plan->pathNum; i += blockDim.x * gridDim.x) { dst[i] = arithPayoff[i] + theta * (plan->geoExp - geoPayoff[i]); } } __global__ void monteCarloOptionKernel( MonteCarlo *plan, double *choMatrix, double *price, double *volatility, double *drift, double *currents, double *depend, double *independ, double *sum, double *sumOutput, double *sum2, double *sum2Output, double *sumX, double *sumXOutput, double *arithPayoff, double *geoPayoff) { __shared__ double sumThread[THREAD_N]; curandState state; int idx = blockIdx.x * blockDim.x + threadIdx.x; int size = plan->basketSize; int offset = idx * size; double dt = plan->maturity / plan->observation; curand_init(1230, idx, 0, &state); for (int i = 0; i < size; i++) { sum[offset + i] = 0; sum2[offset + i] = 0; sumX[offset + i] = 0; } for (int i = idx; i < plan->pathNum; i += blockDim.x * gridDim.x) { double arithMean = 0; double geoMean = 1; for (int j = 0; j < size; j++) { currents[offset + j] = price[j]; } for (int j = 0; j < plan->observation; j++) { randNormal(plan, &state, choMatrix, depend + offset, independ + offset); for (int j = 0; j < size; j++) { double var = depend[offset + j]; sum[offset + j] += var; sum2[offset + j] += var * var; for (int k = 0; k < size; k++) { double val = depend[offset + k]; sumX[offset * size + j * size + k] += var * val; } } for (int k = 0; k < size; k++) { double growthFactor = drift[k] * exp(volatility[k] * sqrt(dt) * depend[offset + k]); currents[offset + k] *= growthFactor; arithMean += currents[offset + k]; geoMean *= currents[offset + k]; } } arithMean /= plan->observation * size; geoMean = pow(geoMean, 1 / (double)(plan->observation * size)); if (plan->type == CALL) { arithPayoff[i] = optionValue(plan, arithMean - plan->strike); geoPayoff[i] = optionValue(plan, geoMean - plan->strike); } else if (plan->type == PUT) { arithPayoff[i] = optionValue(plan, plan->strike - arithMean); geoPayoff[i] = optionValue(plan, plan->strike - geoMean); } } for (int i = 0; i < size; i++) { sumRdx(sumThread, &sumOutput[blockIdx.x * size + i], sum[offset + i]); sumRdx(sumThread, &sum2Output[blockIdx.x * size + i], sum2[offset + i]); for (int j = 0; j < size; j++) { sumRdx(sumThread, &sumXOutput[blockIdx.x * size * size + i * size + j], sumX[offset * size + i * size + j]); } } } Result MonteCarlo::simulateGPU(double *expectation, double *covMatrix) { MonteCarlo *plan; double *pChoMatrix; double *pPrice; double *pVolatility; double *pDrift; double *currents; double *depend; double *independ; double *sum; double *sumOutput; double *sumHost; double *sum2; double *sum2Output; double *sum2Host; double *sumX; double *sumXOutput; double *sumXHost; double *arithPayoff; double *geoPayoff; int size = this->basketSize; cudaMalloc(&plan, sizeof(MonteCarlo)); cudaMalloc(&pChoMatrix, size * size * sizeof(double)); cudaMalloc(&pPrice, size * sizeof(double)); cudaMalloc(&pVolatility, size * sizeof(double)); cudaMalloc(&pDrift, size * sizeof(double)); int totalThread = BLOCK_N * THREAD_N; cudaMalloc(&currents, sizeof(double) * size * totalThread); cudaMalloc(&depend, sizeof(double) * size * totalThread); cudaMalloc(&independ, sizeof(double) * size * totalThread); cudaMalloc(&sum, sizeof(double) * size * totalThread); cudaMalloc(&sum2, sizeof(double) * size * totalThread); cudaMalloc(&sumX, sizeof(double) * size * size * totalThread); cudaMalloc(&sumOutput, sizeof(double) * size * BLOCK_N); cudaMalloc(&sum2Output, sizeof(double) * size * BLOCK_N); cudaMalloc(&sumXOutput, sizeof(double) * size * size * BLOCK_N); cudaMalloc(&arithPayoff, this->pathNum * sizeof(double)); cudaMalloc(&geoPayoff, this->pathNum * sizeof(double)); cudaMemcpy(plan, this, sizeof(MonteCarlo), cudaMemcpyHostToDevice); cudaMemcpy(pChoMatrix, this->choMatrix, size * size * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(pPrice, this->price, size * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(pVolatility, this->volatility, size * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(pDrift, this->drift, size * sizeof(double), cudaMemcpyHostToDevice); monteCarloOptionKernel<<<BLOCK_N, THREAD_N>>>( plan, pChoMatrix, pPrice, pVolatility, pDrift, currents, depend, independ, sum, sumOutput, sum2, sum2Output, sumX, sumXOutput, arithPayoff, geoPayoff); double aMean, gMean, aStd, gStd; statisticGPU(plan, arithPayoff, aMean, aStd); statisticGPU(plan, geoPayoff, gMean, gStd); cudaMallocHost(&sumHost, sizeof(double) * size * BLOCK_N); cudaMallocHost(&sum2Host, sizeof(double) * size * BLOCK_N); cudaMallocHost(&sumXHost, sizeof(double) * size * size * BLOCK_N); cudaMemcpy(sumHost, sumOutput, size * BLOCK_N * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(sum2Host, sum2Output, size * BLOCK_N * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(sumXHost, sumXOutput, size * size * BLOCK_N * sizeof(double), cudaMemcpyDeviceToHost); for (int i = 0; i < size; i++) { expectation[i] = 0; for (int j = 0; j < size; j++) { covMatrix[i * size + j] = 0; } } for (int i = 0; i < BLOCK_N; i++) { for (int j = 0; j < size; j++) { expectation[j] += sumHost[i * size + j]; for (int k = 0; k < size; k++) { covMatrix[j * size + k] += sumXHost[i * size * size + j * size + k]; } } } int pathNum = this->pathNum; for (int i = 0; i < size; i++) { expectation[i] /= pathNum; } for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { covMatrix[i * size + j] = covMatrix[i * size + j] / pathNum - expectation[i] * expectation[j]; } } Result ret; if (isGeo) { ret.mean = gMean; ret.conf = confidence(gStd); } else { if (controlVariate) { double cov = covarianceGPU(plan, arithPayoff, geoPayoff, aMean, gMean); double theta = cov / (gStd * gStd); double *newArith; cudaMalloc(&newArith, this->pathNum * sizeof(double)); variationReduceKernel<<<BLOCK_N, THREAD_N>>>(plan, newArith, arithPayoff, geoPayoff, theta); statisticGPU(plan, newArith, aMean, aStd); cudaFree(newArith); } ret.mean = aMean; ret.conf = confidence(aStd); } cudaFreeHost(sumHost); cudaFreeHost(sum2Host); cudaFree(plan); cudaFree(pChoMatrix); cudaFree(pPrice); cudaFree(pVolatility); cudaFree(pDrift); cudaFree(currents); cudaFree(depend); cudaFree(independ); cudaFree(sum); cudaFree(sum2); cudaFree(sumOutput); cudaFree(sum2Output); cudaFree(sumXOutput); cudaFree(arithPayoff); cudaFree(geoPayoff); return ret; }
9c666ea27bc2030189fa3e2669a03f7929753347.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Example of integrating CUDA functions into an existing * application / framework. * Device code. */ #ifndef _CPP_INTEGRATION_KERNEL_H_ #define _CPP_INTEGRATION_KERNEL_H_ /////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_odata memory to process (in and out) /////////////////////////////////////////////////////////////////////////////// __global__ void kernel( int* g_data ) { // write data to global memory const unsigned int tid = threadIdx.x; int data = g_data[tid]; // use integer arithmetic to process all four bytes with one thread // this serializes the execution, but is the simplest solutions to avoid // bank conflicts for this very low number of threads // in general it is more efficient to process each byte by a separate thread, // to avoid bank conflicts the access pattern should be // g_data[4 * wtid + wid], where wtid is the thread id within the half warp // and wid is the warp id // see also the programming guide for a more in depth discussion. g_data[tid] = ((((data << 0) >> 24) - 10) << 24) | ((((data << 8) >> 24) - 10) << 16) | ((((data << 16) >> 24) - 10) << 8) | ((((data << 24) >> 24) - 10) << 0); } /////////////////////////////////////////////////////////////////////////////// //! Demonstration that int2 data can be used in the cpp code //! @param g_odata memory to process (in and out) /////////////////////////////////////////////////////////////////////////////// __global__ void kernel2( int2* g_data ) { // write data to global memory const unsigned int tid = threadIdx.x; int2 data = g_data[tid]; // use integer arithmetic to process all four bytes with one thread // this serializes the execution, but is the simplest solutions to avoid // bank conflicts for this very low number of threads // in general it is more efficient to process each byte by a separate thread, // to avoid bank conflicts the access pattern should be // g_data[4 * wtid + wid], where wtid is the thread id within the half warp // and wid is the warp id // see also the programming guide for a more in depth discussion. g_data[tid].x = data.x - data.y; } #endif // #ifndef _CPP_INTEGRATION_KERNEL_H_
9c666ea27bc2030189fa3e2669a03f7929753347.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Example of integrating CUDA functions into an existing * application / framework. * Device code. */ #ifndef _CPP_INTEGRATION_KERNEL_H_ #define _CPP_INTEGRATION_KERNEL_H_ /////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_odata memory to process (in and out) /////////////////////////////////////////////////////////////////////////////// __global__ void kernel( int* g_data ) { // write data to global memory const unsigned int tid = threadIdx.x; int data = g_data[tid]; // use integer arithmetic to process all four bytes with one thread // this serializes the execution, but is the simplest solutions to avoid // bank conflicts for this very low number of threads // in general it is more efficient to process each byte by a separate thread, // to avoid bank conflicts the access pattern should be // g_data[4 * wtid + wid], where wtid is the thread id within the half warp // and wid is the warp id // see also the programming guide for a more in depth discussion. g_data[tid] = ((((data << 0) >> 24) - 10) << 24) | ((((data << 8) >> 24) - 10) << 16) | ((((data << 16) >> 24) - 10) << 8) | ((((data << 24) >> 24) - 10) << 0); } /////////////////////////////////////////////////////////////////////////////// //! Demonstration that int2 data can be used in the cpp code //! @param g_odata memory to process (in and out) /////////////////////////////////////////////////////////////////////////////// __global__ void kernel2( int2* g_data ) { // write data to global memory const unsigned int tid = threadIdx.x; int2 data = g_data[tid]; // use integer arithmetic to process all four bytes with one thread // this serializes the execution, but is the simplest solutions to avoid // bank conflicts for this very low number of threads // in general it is more efficient to process each byte by a separate thread, // to avoid bank conflicts the access pattern should be // g_data[4 * wtid + wid], where wtid is the thread id within the half warp // and wid is the warp id // see also the programming guide for a more in depth discussion. g_data[tid].x = data.x - data.y; } #endif // #ifndef _CPP_INTEGRATION_KERNEL_H_
62c139c4ead294d171af4d2e1c5162d4a0a6dcd2.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) // Either 8 or 27 depending on whether cell width = distance or 2 * distance #define NUM_NEIGHBORS 8 /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3 *dev_sortedPos; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to hipFree in Boids::endSimulation. hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!"); hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!"); hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!"); hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!"); hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!"); hipMalloc((void**)&dev_sortedPos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_sortedPos failed!"); hipDeviceSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); hipDeviceSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves // Rule 2: boids try to stay a distance d away from each other // Rule 3: boids try to match the speed of surrounding boids glm::vec3 thisPos = pos[iSelf]; glm::vec3 perceivedCenter; glm::vec3 cohesionVel; glm::vec3 separationVel; glm::vec3 alignmentVel; float rule1NeighborCount = 0.f; float rule3NeighborCount = 0.f; for (int i = 0; i < N; i++) { if (i == iSelf) { continue; } glm::vec3 otherPos = pos[i]; float distance = glm::distance(thisPos, otherPos); if (distance < rule1Distance) { perceivedCenter += otherPos; rule1NeighborCount += 1.0; } if (distance < rule2Distance) { separationVel -= otherPos - thisPos; } if (distance < rule3Distance) { alignmentVel += vel[i]; rule3NeighborCount += 1.0; } } if (rule1NeighborCount > 0) { cohesionVel = (perceivedCenter / rule1NeighborCount - thisPos) * rule1Scale; } separationVel *= rule2Scale; if (rule3NeighborCount > 0) { alignmentVel = alignmentVel / rule3NeighborCount * rule3Scale; } return cohesionVel + separationVel + alignmentVel; } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // Compute a new velocity based on pos and vel1 // Clamp the speed // Record the new velocity into vel2. Question: why NOT vel1? int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 newVel = vel1[index] + computeVelocityChange(N, index, pos, vel1); if (glm::length(newVel) > maxSpeed) { newVel = glm::normalize(newVel) * maxSpeed; } vel2[index] = newVel; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { if (x < 0 || x >= gridResolution || y < 0 || y >= gridResolution || z < 0 || z >= gridResolution) { return -1; } return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // TODO-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } glm::vec3 thisPos = pos[index]; glm::vec3 gridIndex3D = glm::floor((thisPos - gridMin) * inverseCellWidth); gridIndices[index] = gridIndex3Dto1D(gridIndex3D.x, gridIndex3D.y, gridIndex3D.z, gridResolution); indices[index] = index; } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int gridIndex = particleGridIndices[index]; if (index == 0) { gridCellStartIndices[gridIndex] = index; gridCellEndIndices[gridIndex] = index; } else { int prevGridIndex = particleGridIndices[index - 1]; if (prevGridIndex != gridIndex) { gridCellStartIndices[gridIndex] = index; gridCellEndIndices[prevGridIndex] = index - 1; } } } __global__ void kernSortPosAndVel(int N, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *sortedPos, glm::vec3 *vel, glm::vec3 *sortedVel) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int sortedIndex = particleArrayIndices[index]; sortedPos[index] = pos[sortedIndex]; sortedVel[index] = vel[sortedIndex]; } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } glm::vec3 thisPos = pos[index]; glm::vec3 perceivedCenter; glm::vec3 cohesionVel; glm::vec3 separationVel; glm::vec3 alignmentVel; float rule1NeighborCount = 0.f; float rule3NeighborCount = 0.f; int neighborIndices[NUM_NEIGHBORS]; int currentNeighborIndex = 0; glm::vec3 gridIndex3D = glm::floor((thisPos - gridMin) * inverseCellWidth); glm::vec3 gridCenter = (gridIndex3D - gridResolution * 0.5f) * cellWidth + (cellWidth * 0.5f); glm::vec3 displacement = glm::sign(thisPos - gridCenter); int startValue = 0; if (NUM_NEIGHBORS == 27) { startValue = -1; displacement = glm::vec3(1); } for (int z = startValue; z < 2; z++) { for (int y = startValue; y < 2; y++) { for (int x = startValue; x < 2; x++) { neighborIndices[currentNeighborIndex++] = gridIndex3Dto1D(gridIndex3D.x + x * displacement.x, gridIndex3D.y + y * displacement.y, gridIndex3D.z + z * displacement.z, gridResolution); } } } for (int neighborCubeIndex = 0; neighborCubeIndex < currentNeighborIndex; neighborCubeIndex++) { int neighborGridIndex = neighborIndices[neighborCubeIndex]; // An invalid 3D grid index was given if (neighborGridIndex == -1) { continue; } int startIndex = gridCellStartIndices[neighborGridIndex]; int endIndex = gridCellEndIndices[neighborGridIndex]; for (int otherGridArrayIndex = startIndex; otherGridArrayIndex <= endIndex; otherGridArrayIndex++) { int otherParticleArrayIndex = particleArrayIndices[otherGridArrayIndex]; if (otherParticleArrayIndex == index) { continue; } glm::vec3 otherPos = pos[otherParticleArrayIndex]; glm::vec3 otherVel = vel1[otherParticleArrayIndex]; float distance = glm::distance(thisPos, otherPos); if (distance < rule1Distance) { perceivedCenter += otherPos; rule1NeighborCount += 1.0; } if (distance < rule2Distance) { separationVel -= otherPos - thisPos; } if (distance < rule3Distance) { alignmentVel += vel1[otherParticleArrayIndex]; rule3NeighborCount += 1.0; } } } if (rule1NeighborCount > 0) { cohesionVel = (perceivedCenter / rule1NeighborCount - thisPos) * rule1Scale; } separationVel *= rule2Scale; if (rule3NeighborCount > 0) { alignmentVel = alignmentVel / rule3NeighborCount * rule3Scale; } glm::vec3 newVel = vel1[index] + cohesionVel + separationVel + alignmentVel; if (glm::length(newVel) > maxSpeed) { newVel = glm::normalize(newVel) * maxSpeed; } vel2[index] = newVel; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // DIFFERENCE: For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } glm::vec3 thisPos = pos[index]; glm::vec3 perceivedCenter; glm::vec3 cohesionVel; glm::vec3 separationVel; glm::vec3 alignmentVel; float rule1NeighborCount = 0.f; float rule3NeighborCount = 0.f; int neighborIndices[NUM_NEIGHBORS]; int currentNeighborIndex = 0; glm::vec3 gridIndex3D = glm::floor((thisPos - gridMin) * inverseCellWidth); glm::vec3 gridCenter = (gridIndex3D - gridResolution * 0.5f) * cellWidth + (cellWidth * 0.5f); glm::vec3 displacement = glm::sign(thisPos - gridCenter); int startValue = 0; if (NUM_NEIGHBORS == 27) { startValue = -1; displacement = glm::vec3(1); } for (int z = startValue; z < 2; z++) { for (int y = startValue; y < 2; y++) { for (int x = startValue; x < 2; x++) { neighborIndices[currentNeighborIndex++] = gridIndex3Dto1D(gridIndex3D.x + x * displacement.x, gridIndex3D.y + y * displacement.y, gridIndex3D.z + z * displacement.z, gridResolution); } } } for (int neighborCubeIndex = 0; neighborCubeIndex < currentNeighborIndex; neighborCubeIndex++) { int neighborGridIndex = neighborIndices[neighborCubeIndex]; // An invalid 3D grid index was given if (neighborGridIndex == -1) { continue; } int startIndex = gridCellStartIndices[neighborGridIndex]; int endIndex = gridCellEndIndices[neighborGridIndex]; for (int otherGridArrayIndex = startIndex; otherGridArrayIndex <= endIndex; otherGridArrayIndex++) { if (otherGridArrayIndex == index) { continue; } glm::vec3 otherPos = pos[otherGridArrayIndex]; glm::vec3 otherVel = vel1[otherGridArrayIndex]; float distance = glm::distance(thisPos, otherPos); if (distance < rule1Distance) { perceivedCenter += otherPos; rule1NeighborCount += 1.0; } if (distance < rule2Distance) { separationVel -= otherPos - thisPos; } if (distance < rule3Distance) { alignmentVel += vel1[otherGridArrayIndex]; rule3NeighborCount += 1.0; } } } if (rule1NeighborCount > 0) { cohesionVel = (perceivedCenter / rule1NeighborCount - thisPos) * rule1Scale; } separationVel *= rule2Scale; if (rule3NeighborCount > 0) { alignmentVel = alignmentVel / rule3NeighborCount * rule3Scale; } glm::vec3 newVel = vel1[index] + cohesionVel + separationVel + alignmentVel; if (glm::length(newVel) > maxSpeed) { newVel = glm::normalize(newVel) * maxSpeed; } vel2[index] = newVel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. // TODO-1.2 ping-pong the velocity buffers dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernUpdateVelocityBruteForce << <fullBlocksPerGrid, threadsPerBlock>> > (numObjects, dev_pos, dev_vel1, dev_vel2); kernUpdatePos << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2); glm::vec3 *temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; } void Boids::stepSimulationScatteredGrid(float dt) { // TODO-2.1 // Uniform Grid Neighbor search using Thrust sort. // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed dim3 particleBlocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 cellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernComputeIndices), dim3(particleBlocksPerGrid), dim3(threadsPerBlock), 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values); kernResetIntBuffer << <cellBlocksPerGrid, threadsPerBlock >> > (gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer << <cellBlocksPerGrid, threadsPerBlock >> > (gridCellCount, dev_gridCellEndIndices, -1); hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(particleBlocksPerGrid), dim3(threadsPerBlock), 0, 0, numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); kernUpdateVelNeighborSearchScattered << <particleBlocksPerGrid, threadsPerBlock >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); kernUpdatePos << <particleBlocksPerGrid, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2); glm::vec3 *temp = dev_pos; dev_pos = dev_sortedPos; dev_sortedPos = temp; } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. // In Parallel: // - Label each particle with its array index as well as its grid index. // Use 2x width grids // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. dim3 particleBlocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 cellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize); kernComputeIndices << <particleBlocksPerGrid, threadsPerBlock >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values); kernResetIntBuffer << <cellBlocksPerGrid, threadsPerBlock >> > (gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer << <cellBlocksPerGrid, threadsPerBlock >> > (gridCellCount, dev_gridCellEndIndices, -1); kernIdentifyCellStartEnd << <particleBlocksPerGrid, threadsPerBlock >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); kernSortPosAndVel << <particleBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_sortedPos, dev_vel1, dev_vel2); kernUpdateVelNeighborSearchCoherent << <particleBlocksPerGrid, threadsPerBlock >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_sortedPos, dev_vel2, dev_vel1); kernUpdatePos << <particleBlocksPerGrid, threadsPerBlock >> > (numObjects, dt, dev_sortedPos, dev_vel1); glm::vec3 *temp = dev_pos; dev_pos = dev_sortedPos; dev_sortedPos = temp; } void Boids::endSimulation() { hipFree(dev_vel1); hipFree(dev_vel2); hipFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. hipFree(dev_particleArrayIndices); hipFree(dev_particleGridIndices); hipFree(dev_gridCellStartIndices); hipFree(dev_gridCellEndIndices); hipFree(dev_sortedPos); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; hipMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!"); hipMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice); hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost); hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; hipFree(dev_intKeys); hipFree(dev_intValues); checkCUDAErrorWithLine("hipFree failed!"); return; }
62c139c4ead294d171af4d2e1c5162d4a0a6dcd2.cu
#define GLM_FORCE_CUDA #include <stdio.h> #include <cuda.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) // Either 8 or 27 depending on whether cell width = distance or 2 * distance #define NUM_NEIGHBORS 8 /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3 *dev_sortedPos; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to cudaFree in Boids::endSimulation. cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!"); cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!"); cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!"); cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!"); cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!"); cudaMalloc((void**)&dev_sortedPos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_sortedPos failed!"); cudaThreadSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); cudaThreadSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves // Rule 2: boids try to stay a distance d away from each other // Rule 3: boids try to match the speed of surrounding boids glm::vec3 thisPos = pos[iSelf]; glm::vec3 perceivedCenter; glm::vec3 cohesionVel; glm::vec3 separationVel; glm::vec3 alignmentVel; float rule1NeighborCount = 0.f; float rule3NeighborCount = 0.f; for (int i = 0; i < N; i++) { if (i == iSelf) { continue; } glm::vec3 otherPos = pos[i]; float distance = glm::distance(thisPos, otherPos); if (distance < rule1Distance) { perceivedCenter += otherPos; rule1NeighborCount += 1.0; } if (distance < rule2Distance) { separationVel -= otherPos - thisPos; } if (distance < rule3Distance) { alignmentVel += vel[i]; rule3NeighborCount += 1.0; } } if (rule1NeighborCount > 0) { cohesionVel = (perceivedCenter / rule1NeighborCount - thisPos) * rule1Scale; } separationVel *= rule2Scale; if (rule3NeighborCount > 0) { alignmentVel = alignmentVel / rule3NeighborCount * rule3Scale; } return cohesionVel + separationVel + alignmentVel; } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // Compute a new velocity based on pos and vel1 // Clamp the speed // Record the new velocity into vel2. Question: why NOT vel1? int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 newVel = vel1[index] + computeVelocityChange(N, index, pos, vel1); if (glm::length(newVel) > maxSpeed) { newVel = glm::normalize(newVel) * maxSpeed; } vel2[index] = newVel; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { if (x < 0 || x >= gridResolution || y < 0 || y >= gridResolution || z < 0 || z >= gridResolution) { return -1; } return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // TODO-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } glm::vec3 thisPos = pos[index]; glm::vec3 gridIndex3D = glm::floor((thisPos - gridMin) * inverseCellWidth); gridIndices[index] = gridIndex3Dto1D(gridIndex3D.x, gridIndex3D.y, gridIndex3D.z, gridResolution); indices[index] = index; } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int gridIndex = particleGridIndices[index]; if (index == 0) { gridCellStartIndices[gridIndex] = index; gridCellEndIndices[gridIndex] = index; } else { int prevGridIndex = particleGridIndices[index - 1]; if (prevGridIndex != gridIndex) { gridCellStartIndices[gridIndex] = index; gridCellEndIndices[prevGridIndex] = index - 1; } } } __global__ void kernSortPosAndVel(int N, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *sortedPos, glm::vec3 *vel, glm::vec3 *sortedVel) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int sortedIndex = particleArrayIndices[index]; sortedPos[index] = pos[sortedIndex]; sortedVel[index] = vel[sortedIndex]; } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } glm::vec3 thisPos = pos[index]; glm::vec3 perceivedCenter; glm::vec3 cohesionVel; glm::vec3 separationVel; glm::vec3 alignmentVel; float rule1NeighborCount = 0.f; float rule3NeighborCount = 0.f; int neighborIndices[NUM_NEIGHBORS]; int currentNeighborIndex = 0; glm::vec3 gridIndex3D = glm::floor((thisPos - gridMin) * inverseCellWidth); glm::vec3 gridCenter = (gridIndex3D - gridResolution * 0.5f) * cellWidth + (cellWidth * 0.5f); glm::vec3 displacement = glm::sign(thisPos - gridCenter); int startValue = 0; if (NUM_NEIGHBORS == 27) { startValue = -1; displacement = glm::vec3(1); } for (int z = startValue; z < 2; z++) { for (int y = startValue; y < 2; y++) { for (int x = startValue; x < 2; x++) { neighborIndices[currentNeighborIndex++] = gridIndex3Dto1D(gridIndex3D.x + x * displacement.x, gridIndex3D.y + y * displacement.y, gridIndex3D.z + z * displacement.z, gridResolution); } } } for (int neighborCubeIndex = 0; neighborCubeIndex < currentNeighborIndex; neighborCubeIndex++) { int neighborGridIndex = neighborIndices[neighborCubeIndex]; // An invalid 3D grid index was given if (neighborGridIndex == -1) { continue; } int startIndex = gridCellStartIndices[neighborGridIndex]; int endIndex = gridCellEndIndices[neighborGridIndex]; for (int otherGridArrayIndex = startIndex; otherGridArrayIndex <= endIndex; otherGridArrayIndex++) { int otherParticleArrayIndex = particleArrayIndices[otherGridArrayIndex]; if (otherParticleArrayIndex == index) { continue; } glm::vec3 otherPos = pos[otherParticleArrayIndex]; glm::vec3 otherVel = vel1[otherParticleArrayIndex]; float distance = glm::distance(thisPos, otherPos); if (distance < rule1Distance) { perceivedCenter += otherPos; rule1NeighborCount += 1.0; } if (distance < rule2Distance) { separationVel -= otherPos - thisPos; } if (distance < rule3Distance) { alignmentVel += vel1[otherParticleArrayIndex]; rule3NeighborCount += 1.0; } } } if (rule1NeighborCount > 0) { cohesionVel = (perceivedCenter / rule1NeighborCount - thisPos) * rule1Scale; } separationVel *= rule2Scale; if (rule3NeighborCount > 0) { alignmentVel = alignmentVel / rule3NeighborCount * rule3Scale; } glm::vec3 newVel = vel1[index] + cohesionVel + separationVel + alignmentVel; if (glm::length(newVel) > maxSpeed) { newVel = glm::normalize(newVel) * maxSpeed; } vel2[index] = newVel; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // DIFFERENCE: For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } glm::vec3 thisPos = pos[index]; glm::vec3 perceivedCenter; glm::vec3 cohesionVel; glm::vec3 separationVel; glm::vec3 alignmentVel; float rule1NeighborCount = 0.f; float rule3NeighborCount = 0.f; int neighborIndices[NUM_NEIGHBORS]; int currentNeighborIndex = 0; glm::vec3 gridIndex3D = glm::floor((thisPos - gridMin) * inverseCellWidth); glm::vec3 gridCenter = (gridIndex3D - gridResolution * 0.5f) * cellWidth + (cellWidth * 0.5f); glm::vec3 displacement = glm::sign(thisPos - gridCenter); int startValue = 0; if (NUM_NEIGHBORS == 27) { startValue = -1; displacement = glm::vec3(1); } for (int z = startValue; z < 2; z++) { for (int y = startValue; y < 2; y++) { for (int x = startValue; x < 2; x++) { neighborIndices[currentNeighborIndex++] = gridIndex3Dto1D(gridIndex3D.x + x * displacement.x, gridIndex3D.y + y * displacement.y, gridIndex3D.z + z * displacement.z, gridResolution); } } } for (int neighborCubeIndex = 0; neighborCubeIndex < currentNeighborIndex; neighborCubeIndex++) { int neighborGridIndex = neighborIndices[neighborCubeIndex]; // An invalid 3D grid index was given if (neighborGridIndex == -1) { continue; } int startIndex = gridCellStartIndices[neighborGridIndex]; int endIndex = gridCellEndIndices[neighborGridIndex]; for (int otherGridArrayIndex = startIndex; otherGridArrayIndex <= endIndex; otherGridArrayIndex++) { if (otherGridArrayIndex == index) { continue; } glm::vec3 otherPos = pos[otherGridArrayIndex]; glm::vec3 otherVel = vel1[otherGridArrayIndex]; float distance = glm::distance(thisPos, otherPos); if (distance < rule1Distance) { perceivedCenter += otherPos; rule1NeighborCount += 1.0; } if (distance < rule2Distance) { separationVel -= otherPos - thisPos; } if (distance < rule3Distance) { alignmentVel += vel1[otherGridArrayIndex]; rule3NeighborCount += 1.0; } } } if (rule1NeighborCount > 0) { cohesionVel = (perceivedCenter / rule1NeighborCount - thisPos) * rule1Scale; } separationVel *= rule2Scale; if (rule3NeighborCount > 0) { alignmentVel = alignmentVel / rule3NeighborCount * rule3Scale; } glm::vec3 newVel = vel1[index] + cohesionVel + separationVel + alignmentVel; if (glm::length(newVel) > maxSpeed) { newVel = glm::normalize(newVel) * maxSpeed; } vel2[index] = newVel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. // TODO-1.2 ping-pong the velocity buffers dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernUpdateVelocityBruteForce << <fullBlocksPerGrid, threadsPerBlock>> > (numObjects, dev_pos, dev_vel1, dev_vel2); kernUpdatePos << <fullBlocksPerGrid, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2); glm::vec3 *temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; } void Boids::stepSimulationScatteredGrid(float dt) { // TODO-2.1 // Uniform Grid Neighbor search using Thrust sort. // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed dim3 particleBlocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 cellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize); kernComputeIndices<<<particleBlocksPerGrid, threadsPerBlock>>>(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values); kernResetIntBuffer << <cellBlocksPerGrid, threadsPerBlock >> > (gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer << <cellBlocksPerGrid, threadsPerBlock >> > (gridCellCount, dev_gridCellEndIndices, -1); kernIdentifyCellStartEnd<<<particleBlocksPerGrid, threadsPerBlock>>>(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); kernUpdateVelNeighborSearchScattered << <particleBlocksPerGrid, threadsPerBlock >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); kernUpdatePos << <particleBlocksPerGrid, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2); glm::vec3 *temp = dev_pos; dev_pos = dev_sortedPos; dev_sortedPos = temp; } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. // In Parallel: // - Label each particle with its array index as well as its grid index. // Use 2x width grids // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. dim3 particleBlocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 cellBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize); kernComputeIndices << <particleBlocksPerGrid, threadsPerBlock >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values); kernResetIntBuffer << <cellBlocksPerGrid, threadsPerBlock >> > (gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer << <cellBlocksPerGrid, threadsPerBlock >> > (gridCellCount, dev_gridCellEndIndices, -1); kernIdentifyCellStartEnd << <particleBlocksPerGrid, threadsPerBlock >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); kernSortPosAndVel << <particleBlocksPerGrid, threadsPerBlock >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_sortedPos, dev_vel1, dev_vel2); kernUpdateVelNeighborSearchCoherent << <particleBlocksPerGrid, threadsPerBlock >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_sortedPos, dev_vel2, dev_vel1); kernUpdatePos << <particleBlocksPerGrid, threadsPerBlock >> > (numObjects, dt, dev_sortedPos, dev_vel1); glm::vec3 *temp = dev_pos; dev_pos = dev_sortedPos; dev_sortedPos = temp; } void Boids::endSimulation() { cudaFree(dev_vel1); cudaFree(dev_vel2); cudaFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. cudaFree(dev_particleArrayIndices); cudaFree(dev_particleGridIndices); cudaFree(dev_gridCellStartIndices); cudaFree(dev_gridCellEndIndices); cudaFree(dev_sortedPos); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; cudaMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!"); cudaMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; cudaFree(dev_intKeys); cudaFree(dev_intValues); checkCUDAErrorWithLine("cudaFree failed!"); return; }
1e69f16feb824fc7198552f53f3b7c04a805aeb7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void SoftClipKernel( const float* p_Input, float* p_Output, int p_Width, int p_Height, float p_SoftClipA, float p_SoftClipB, float p_SoftClipC, float p_SoftClipD, float p_SoftClipE, float p_SoftClipF, int p_SwitchA, int p_SwitchB, int p_Source) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < p_Width && y < p_Height) { const int index = (y * p_Width + x) * 4; float r = p_Input[index]; float g = p_Input[index + 1]; float b = p_Input[index + 2]; float cr = (powf(10.0f, (1023.0f * r - 685.0f) / 300.0f) - 0.0108f) / (1.0f - 0.0108f); float cg = (powf(10.0f, (1023.0f * g - 685.0f) / 300.0f) - 0.0108f) / (1.0f - 0.0108f); float cb = (powf(10.0f, (1023.0f * b - 685.0f) / 300.0f) - 0.0108f) / (1.0f - 0.0108f); float lr = r > 0.1496582f ? (powf(10.0f, (r - 0.385537f) / 0.2471896f) - 0.052272f) / 5.555556f : (r - 0.092809f) / 5.367655f; float lg = g > 0.1496582f ? (powf(10.0f, (g - 0.385537f) / 0.2471896f) - 0.052272f) / 5.555556f : (g - 0.092809f) / 5.367655f; float lb = b > 0.1496582f ? (powf(10.0f, (b - 0.385537f) / 0.2471896f) - 0.052272f) / 5.555556f : (b - 0.092809f) / 5.367655f; float mr = lr * 1.617523f + lg * -0.537287f + lb * -0.080237f; float mg = lr * -0.070573f + lg * 1.334613f + lb * -0.26404f; float mb = lr * -0.021102f + lg * -0.226954f + lb * 1.248056f; float sr = p_Source == 0 ? r : p_Source == 1 ? cr : mr; float sg = p_Source == 0 ? g : p_Source == 1 ? cg : mg; float sb = p_Source == 0 ? b : p_Source == 1 ? cb : mb; float Lr = sr > 1.0f ? 1.0f : sr; float Lg = sg > 1.0f ? 1.0f : sg; float Lb = sb > 1.0f ? 1.0f : sb; float Hr = (sr < 1.0f ? 1.0f : sr) - 1.0f; float Hg = (sg < 1.0f ? 1.0f : sg) - 1.0f; float Hb = (sb < 1.0f ? 1.0f : sb) - 1.0f; float rr = p_SoftClipA; float gg = p_SoftClipB; float aa = p_SoftClipC; float bb = p_SoftClipD; float ss = 1.0f - (p_SoftClipE / 10.0f); float sf = 1.0f - p_SoftClipF; float Hrr = Hr * powf(2.0f, rr); float Hgg = Hg * powf(2.0f, rr); float Hbb = Hb * powf(2.0f, rr); float HR = Hrr <= 1.0f ? 1.0f - powf(1.0f - Hrr, gg) : Hrr; float HG = Hgg <= 1.0f ? 1.0f - powf(1.0f - Hgg, gg) : Hgg; float HB = Hbb <= 1.0f ? 1.0f - powf(1.0f - Hbb, gg) : Hbb; float R = Lr + HR; float G = Lg + HG; float B = Lb + HB; float softr = aa == 1.0f ? R : (R > aa ? (-1.0f / ((R - aa) / (bb - aa) + 1.0f) + 1.0f) * (bb - aa) + aa : R); float softR = bb == 1.0f ? softr : softr > 1.0f - (bb / 50.0f) ? (-1.0f / ((softr - (1.0f - (bb / 50.0f))) / (1.0f - (1.0f - (bb / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (bb / 50.0f))) + (1.0f - (bb / 50.0f)) : softr; float softg = (aa == 1.0f) ? G : (G > aa ? (-1.0f / ((G - aa) / (bb - aa) + 1.0f) + 1.0f) * (bb - aa) + aa : G); float softG = bb == 1.0f ? softg : softg > 1.0f - (bb / 50.0f) ? (-1.0f / ((softg - (1.0f - (bb / 50.0f))) / (1.0f - (1.0f - (bb / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (bb / 50.0f))) + (1.0f - (bb / 50.0f)) : softg; float softb = (aa == 1.0f) ? B : (B > aa ? (-1.0f / ((B - aa) / (bb - aa) + 1.0f) + 1.0f) * (bb - aa) + aa : B); float softB = bb == 1.0f ? softb : softb > 1.0f - (bb / 50.0f) ? (-1.0f / ((softb - (1.0f - (bb / 50.0f))) / (1.0f - (1.0f - (bb / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (bb / 50.0f))) + (1.0f - (bb / 50.0f)) : softb; float Cr = (softR * -1.0f) + 1.0f; float Cg = (softG * -1.0f) + 1.0f; float Cb = (softB * -1.0f) + 1.0f; float cR = ss == 1.0f ? Cr : Cr > ss ? (-1.0f / ((Cr - ss) / (sf - ss) + 1.0f) + 1.0f) * (sf - ss) + ss : Cr; float CR = sf == 1.0f ? (cR - 1.0f) * -1.0f : ((cR > 1.0f - (-p_SoftClipF / 50.0f) ? (-1.0f / ((cR - (1.0f - (-p_SoftClipF / 50.0f))) / (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + (1.0f - (-p_SoftClipF / 50.0f)) : cR) - 1.0f) * -1.0f; float cG = ss == 1.0f ? Cg : Cg > ss ? (-1.0f / ((Cg - ss) / (sf - ss) + 1.0f) + 1.0f) * (sf - ss) + ss : Cg; float CG = sf == 1.0f ? (cG - 1.0f) * -1.0f : ((cG > 1.0f - (-p_SoftClipF / 50.0f) ? (-1.0f / ((cG - (1.0f - (-p_SoftClipF / 50.0f))) / (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + (1.0f - (-p_SoftClipF / 50.0f)) : cG) - 1.0f) * -1.0f; float cB = ss == 1.0f ? Cb : Cb > ss ? (-1.0f / ((Cb - ss) / (sf - ss) + 1.0f) + 1.0f) * (sf - ss) + ss : Cb; float CB = sf == 1.0f ? (cB - 1.0f) * -1.0f : ((cB > 1.0f - (-p_SoftClipF / 50.0f) ? (-1.0f / ((cB - (1.0f - (-p_SoftClipF / 50.0f))) / (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + (1.0f - (-p_SoftClipF / 50.0f)) : cB) - 1.0f) * -1.0f; float SR = p_Source == 0 ? CR : CR >= 0.0f && CR <= 1.0f ? (CR < 0.0181f ? (CR * 4.5f) : 1.0993f * powf(CR, 0.45f) - (1.0993f - 1.0f)) : CR; float SG = p_Source == 0 ? CG : CG >= 0.0f && CG <= 1.0f ? (CG < 0.0181f ? (CG * 4.5f) : 1.0993f * powf(CG, 0.45f) - (1.0993f - 1.0f)) : CG; float SB = p_Source == 0 ? CB : CB >= 0.0f && CB <= 1.0f ? (CB < 0.0181f ? (CB * 4.5f) : 1.0993f * powf(CB, 0.45f) - (1.0993f - 1.0f)) : CB; p_Output[index] = p_SwitchA == 1 ? (SR < 1.0f ? 1.0f : SR) - 1.0f : p_SwitchB == 1 ? (SR >= 0.0f ? 0.0f : SR + 1.0f) : SR; p_Output[index + 1] = p_SwitchA == 1 ? (SG < 1.0f ? 1.0f : SG) - 1.0f : p_SwitchB == 1 ? (SG >= 0.0f ? 0.0f : SG + 1.0f) : SG; p_Output[index + 2] = p_SwitchA == 1 ? (SB < 1.0f ? 1.0f : SB) - 1.0f : p_SwitchB == 1 ? (SB >= 0.0f ? 0.0f : SB + 1.0f) : SB; p_Output[index + 3] = p_Input[index + 3]; }}
1e69f16feb824fc7198552f53f3b7c04a805aeb7.cu
#include "includes.h" __global__ void SoftClipKernel( const float* p_Input, float* p_Output, int p_Width, int p_Height, float p_SoftClipA, float p_SoftClipB, float p_SoftClipC, float p_SoftClipD, float p_SoftClipE, float p_SoftClipF, int p_SwitchA, int p_SwitchB, int p_Source) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < p_Width && y < p_Height) { const int index = (y * p_Width + x) * 4; float r = p_Input[index]; float g = p_Input[index + 1]; float b = p_Input[index + 2]; float cr = (powf(10.0f, (1023.0f * r - 685.0f) / 300.0f) - 0.0108f) / (1.0f - 0.0108f); float cg = (powf(10.0f, (1023.0f * g - 685.0f) / 300.0f) - 0.0108f) / (1.0f - 0.0108f); float cb = (powf(10.0f, (1023.0f * b - 685.0f) / 300.0f) - 0.0108f) / (1.0f - 0.0108f); float lr = r > 0.1496582f ? (powf(10.0f, (r - 0.385537f) / 0.2471896f) - 0.052272f) / 5.555556f : (r - 0.092809f) / 5.367655f; float lg = g > 0.1496582f ? (powf(10.0f, (g - 0.385537f) / 0.2471896f) - 0.052272f) / 5.555556f : (g - 0.092809f) / 5.367655f; float lb = b > 0.1496582f ? (powf(10.0f, (b - 0.385537f) / 0.2471896f) - 0.052272f) / 5.555556f : (b - 0.092809f) / 5.367655f; float mr = lr * 1.617523f + lg * -0.537287f + lb * -0.080237f; float mg = lr * -0.070573f + lg * 1.334613f + lb * -0.26404f; float mb = lr * -0.021102f + lg * -0.226954f + lb * 1.248056f; float sr = p_Source == 0 ? r : p_Source == 1 ? cr : mr; float sg = p_Source == 0 ? g : p_Source == 1 ? cg : mg; float sb = p_Source == 0 ? b : p_Source == 1 ? cb : mb; float Lr = sr > 1.0f ? 1.0f : sr; float Lg = sg > 1.0f ? 1.0f : sg; float Lb = sb > 1.0f ? 1.0f : sb; float Hr = (sr < 1.0f ? 1.0f : sr) - 1.0f; float Hg = (sg < 1.0f ? 1.0f : sg) - 1.0f; float Hb = (sb < 1.0f ? 1.0f : sb) - 1.0f; float rr = p_SoftClipA; float gg = p_SoftClipB; float aa = p_SoftClipC; float bb = p_SoftClipD; float ss = 1.0f - (p_SoftClipE / 10.0f); float sf = 1.0f - p_SoftClipF; float Hrr = Hr * powf(2.0f, rr); float Hgg = Hg * powf(2.0f, rr); float Hbb = Hb * powf(2.0f, rr); float HR = Hrr <= 1.0f ? 1.0f - powf(1.0f - Hrr, gg) : Hrr; float HG = Hgg <= 1.0f ? 1.0f - powf(1.0f - Hgg, gg) : Hgg; float HB = Hbb <= 1.0f ? 1.0f - powf(1.0f - Hbb, gg) : Hbb; float R = Lr + HR; float G = Lg + HG; float B = Lb + HB; float softr = aa == 1.0f ? R : (R > aa ? (-1.0f / ((R - aa) / (bb - aa) + 1.0f) + 1.0f) * (bb - aa) + aa : R); float softR = bb == 1.0f ? softr : softr > 1.0f - (bb / 50.0f) ? (-1.0f / ((softr - (1.0f - (bb / 50.0f))) / (1.0f - (1.0f - (bb / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (bb / 50.0f))) + (1.0f - (bb / 50.0f)) : softr; float softg = (aa == 1.0f) ? G : (G > aa ? (-1.0f / ((G - aa) / (bb - aa) + 1.0f) + 1.0f) * (bb - aa) + aa : G); float softG = bb == 1.0f ? softg : softg > 1.0f - (bb / 50.0f) ? (-1.0f / ((softg - (1.0f - (bb / 50.0f))) / (1.0f - (1.0f - (bb / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (bb / 50.0f))) + (1.0f - (bb / 50.0f)) : softg; float softb = (aa == 1.0f) ? B : (B > aa ? (-1.0f / ((B - aa) / (bb - aa) + 1.0f) + 1.0f) * (bb - aa) + aa : B); float softB = bb == 1.0f ? softb : softb > 1.0f - (bb / 50.0f) ? (-1.0f / ((softb - (1.0f - (bb / 50.0f))) / (1.0f - (1.0f - (bb / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (bb / 50.0f))) + (1.0f - (bb / 50.0f)) : softb; float Cr = (softR * -1.0f) + 1.0f; float Cg = (softG * -1.0f) + 1.0f; float Cb = (softB * -1.0f) + 1.0f; float cR = ss == 1.0f ? Cr : Cr > ss ? (-1.0f / ((Cr - ss) / (sf - ss) + 1.0f) + 1.0f) * (sf - ss) + ss : Cr; float CR = sf == 1.0f ? (cR - 1.0f) * -1.0f : ((cR > 1.0f - (-p_SoftClipF / 50.0f) ? (-1.0f / ((cR - (1.0f - (-p_SoftClipF / 50.0f))) / (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + (1.0f - (-p_SoftClipF / 50.0f)) : cR) - 1.0f) * -1.0f; float cG = ss == 1.0f ? Cg : Cg > ss ? (-1.0f / ((Cg - ss) / (sf - ss) + 1.0f) + 1.0f) * (sf - ss) + ss : Cg; float CG = sf == 1.0f ? (cG - 1.0f) * -1.0f : ((cG > 1.0f - (-p_SoftClipF / 50.0f) ? (-1.0f / ((cG - (1.0f - (-p_SoftClipF / 50.0f))) / (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + (1.0f - (-p_SoftClipF / 50.0f)) : cG) - 1.0f) * -1.0f; float cB = ss == 1.0f ? Cb : Cb > ss ? (-1.0f / ((Cb - ss) / (sf - ss) + 1.0f) + 1.0f) * (sf - ss) + ss : Cb; float CB = sf == 1.0f ? (cB - 1.0f) * -1.0f : ((cB > 1.0f - (-p_SoftClipF / 50.0f) ? (-1.0f / ((cB - (1.0f - (-p_SoftClipF / 50.0f))) / (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + (1.0f - (-p_SoftClipF / 50.0f)) : cB) - 1.0f) * -1.0f; float SR = p_Source == 0 ? CR : CR >= 0.0f && CR <= 1.0f ? (CR < 0.0181f ? (CR * 4.5f) : 1.0993f * powf(CR, 0.45f) - (1.0993f - 1.0f)) : CR; float SG = p_Source == 0 ? CG : CG >= 0.0f && CG <= 1.0f ? (CG < 0.0181f ? (CG * 4.5f) : 1.0993f * powf(CG, 0.45f) - (1.0993f - 1.0f)) : CG; float SB = p_Source == 0 ? CB : CB >= 0.0f && CB <= 1.0f ? (CB < 0.0181f ? (CB * 4.5f) : 1.0993f * powf(CB, 0.45f) - (1.0993f - 1.0f)) : CB; p_Output[index] = p_SwitchA == 1 ? (SR < 1.0f ? 1.0f : SR) - 1.0f : p_SwitchB == 1 ? (SR >= 0.0f ? 0.0f : SR + 1.0f) : SR; p_Output[index + 1] = p_SwitchA == 1 ? (SG < 1.0f ? 1.0f : SG) - 1.0f : p_SwitchB == 1 ? (SG >= 0.0f ? 0.0f : SG + 1.0f) : SG; p_Output[index + 2] = p_SwitchA == 1 ? (SB < 1.0f ? 1.0f : SB) - 1.0f : p_SwitchB == 1 ? (SB >= 0.0f ? 0.0f : SB + 1.0f) : SB; p_Output[index + 3] = p_Input[index + 3]; }}
b0e21b283c2bb614e841f74c788e9fd46b4719c8.hip
// !!! This is a file automatically generated by hipify!!! #include <mpi.h> #include <hiprand/hiprand.h> #include "inc/conf.h" #include "d/q.h" #include "d/api.h" #include "d/ker.h" #include "utils/error.h" #include "inc/def.h" #include "utils/msg.h" #include "utils/cc.h" #include "utils/mc.h" #include "utils/kl.h" #include "mpi/wrapper.h" #include "inc/type.h" #include "inc/dev.h" #include "utils/imp.h" #include "coords/imp.h" #include "array3d/imp.h" #include "tex3d/type.h" #include "tex3d/imp.h" #include "field/imp.h" #include "bounce/imp.h" #include "label/imp.h" #include "math/tform/type.h" #include "math/tform/imp.h" #include "math/tform/dev.h" #include "tform/imp.h" #include "algo/utils/shfl.h" #include "algo/utils/dev.h" #include "type.h" #include "imp.h" #include "dev.h" namespace sdf_dev { #include "dev/main.h" } #include "imp/type.h" #include "imp/gen.h" #include "imp/split.h" #include "imp/main.h"
b0e21b283c2bb614e841f74c788e9fd46b4719c8.cu
#include <mpi.h> #include <curand.h> #include "inc/conf.h" #include "d/q.h" #include "d/api.h" #include "d/ker.h" #include "utils/error.h" #include "inc/def.h" #include "utils/msg.h" #include "utils/cc.h" #include "utils/mc.h" #include "utils/kl.h" #include "mpi/wrapper.h" #include "inc/type.h" #include "inc/dev.h" #include "utils/imp.h" #include "coords/imp.h" #include "array3d/imp.h" #include "tex3d/type.h" #include "tex3d/imp.h" #include "field/imp.h" #include "bounce/imp.h" #include "label/imp.h" #include "math/tform/type.h" #include "math/tform/imp.h" #include "math/tform/dev.h" #include "tform/imp.h" #include "algo/utils/shfl.h" #include "algo/utils/dev.h" #include "type.h" #include "imp.h" #include "dev.h" namespace sdf_dev { #include "dev/main.h" } #include "imp/type.h" #include "imp/gen.h" #include "imp/split.h" #include "imp/main.h"
267b9d476440bb1b7a51e655d02564b90e8a1aec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "shared.h" #include "dTV_FGP_GPU_core.h" #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> /* CUDA implementation of FGP-dTV [1,2] denoising/regularization model (2D/3D case) * which employs structural similarity of the level sets of two images/volumes, see [1,2] * The current implementation updates image 1 while image 2 is being fixed. * * Input Parameters: * 1. Noisy image/volume [REQUIRED] * 2. Additional reference image/volume of the same dimensions as (1) [REQUIRED] * 3. lambdaPar - regularization parameter [REQUIRED] * 4. Number of iterations [OPTIONAL] * 5. eplsilon: tolerance constant [OPTIONAL] * 6. eta: smoothing constant to calculate gradient of the reference [OPTIONAL] * * 7. TV-type: methodTV - 'iso' (0) or 'l1' (1) [OPTIONAL] * 8. nonneg: 'nonnegativity (0 is OFF by default) [OPTIONAL] * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the Matlab's codes and papers by * [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems" * [2] M. J. Ehrhardt and M. M. Betcke, Multi-Contrast MRI Reconstruction with Structure-Guided Total Variation, SIAM Journal on Imaging Sciences 9(3), pp. 10841106 */ #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define BLKXSIZE 8 #define BLKYSIZE 8 #define BLKZSIZE 8 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) //struct square { __host__ __device__ float operator()(float x) { return x * x; } }; /************************************************/ /*****************2D modules*********************/ /************************************************/ __global__ void GradNorm_func2D_kernel(float *Refd, float *Refd_x, float *Refd_y, float eta, int N, int M, int ImSize) { float val1, val2, gradX, gradY, magn; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { /* boundary conditions */ if (xIndex >= N-1) val1 = 0.0f; else val1 = Refd[(xIndex+1) + N*yIndex]; if (yIndex >= M-1) val2 = 0.0f; else val2 = Refd[(xIndex) + N*(yIndex + 1)]; gradX = val1 - Refd[index]; gradY = val2 - Refd[index]; magn = pow(gradX,2) + pow(gradY,2); magn = sqrt(magn + pow(eta,2)); Refd_x[index] = gradX/magn; Refd_y[index] = gradY/magn; } return; } __global__ void ProjectVect_func2D_kernel(float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize) { float in_prod; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*Refd_x[index]; R2[index] = R2[index] - in_prod*Refd_y[index]; } return; } __global__ void Obj_dfunc2D_kernel(float *Ad, float *D, float *R1, float *R2, int N, int M, int ImSize, float lambda) { float val1,val2; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { if (xIndex <= 0) {val1 = 0.0f;} else {val1 = R1[(xIndex-1) + N*yIndex];} if (yIndex <= 0) {val2 = 0.0f;} else {val2 = R2[xIndex + N*(yIndex-1)];} //Write final result to global memory D[index] = Ad[index] - lambda*(R1[index] + R2[index] - val1 - val2); } return; } __global__ void Grad_dfunc2D_kernel(float *P1, float *P2, float *D, float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize, float multip) { float val1,val2,in_prod; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { /* boundary conditions */ if (xIndex >= N-1) val1 = 0.0f; else val1 = D[index] - D[(xIndex+1) + N*yIndex]; if (yIndex >= M-1) val2 = 0.0f; else val2 = D[index] - D[(xIndex) + N*(yIndex + 1)]; in_prod = val1*Refd_x[index] + val2*Refd_y[index]; /* calculate inner product */ val1 = val1 - in_prod*Refd_x[index]; val2 = val2 - in_prod*Refd_y[index]; //Write final result to global memory P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; } return; } __global__ void Proj_dfunc2D_iso_kernel(float *P1, float *P2, int N, int M, int ImSize) { float denom; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { denom = pow(P1[index],2) + pow(P2[index],2); if (denom > 1.0f) { P1[index] = P1[index]/sqrt(denom); P2[index] = P2[index]/sqrt(denom); } } return; } __global__ void Proj_dfunc2D_aniso_kernel(float *P1, float *P2, int N, int M, int ImSize) { float val1, val2; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { val1 = abs(P1[index]); val2 = abs(P2[index]); if (val1 < 1.0f) {val1 = 1.0f;} if (val2 < 1.0f) {val2 = 1.0f;} P1[index] = P1[index]/val1; P2[index] = P2[index]/val2; } return; } __global__ void Rupd_dfunc2D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, float multip2, int N, int M, int ImSize) { //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]); R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]); } return; } __global__ void dTVnonneg2D_kernel(float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { if (Output[index] < 0.0f) Output[index] = 0.0f; } } __global__ void dTVcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input[index]; } } __global__ void dTVcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input[index]; } } __global__ void dTVResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } __global__ void dTVResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } /************************************************/ /*****************3D modules*********************/ /************************************************/ __global__ void GradNorm_func3D_kernel(float *Refd, float *Refd_x, float *Refd_y, float *Refd_z, float eta, int N, int M, int Z, int ImSize) { float val1, val2, val3, gradX, gradY, gradZ, magn; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { /* boundary conditions */ if (i >= N-1) val1 = 0.0f; else val1 = Refd[(N*M)*k + (i+1) + N*j]; if (j >= M-1) val2 = 0.0f; else val2 = Refd[(N*M)*k + i + N*(j+1)]; if (k >= Z-1) val3 = 0.0f; else val3 = Refd[(N*M)*(k+1) + i + N*j]; gradX = val1 - Refd[index]; gradY = val2 - Refd[index]; gradZ = val3 - Refd[index]; magn = pow(gradX,2) + pow(gradY,2) + pow(gradZ,2); magn = sqrt(magn + pow(eta,2)); Refd_x[index] = gradX/magn; Refd_y[index] = gradY/magn; Refd_z[index] = gradZ/magn; } return; } __global__ void ProjectVect_func3D_kernel(float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize) { float in_prod; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index] + R3[index]*Refd_z[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*Refd_x[index]; R2[index] = R2[index] - in_prod*Refd_y[index]; R3[index] = R3[index] - in_prod*Refd_z[index]; } return; } __global__ void Obj_dfunc3D_kernel(float *Ad, float *D, float *R1, float *R2, float *R3, int N, int M, int Z, int ImSize, float lambda) { float val1,val2,val3; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { if (i <= 0) {val1 = 0.0f;} else {val1 = R1[(N*M)*(k) + (i-1) + N*j];} if (j <= 0) {val2 = 0.0f;} else {val2 = R2[(N*M)*(k) + i + N*(j-1)];} if (k <= 0) {val3 = 0.0f;} else {val3 = R3[(N*M)*(k-1) + i + N*j];} //Write final result to global memory D[index] = Ad[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3); } return; } __global__ void Grad_dfunc3D_kernel(float *P1, float *P2, float *P3, float *D, float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize, float multip) { float val1,val2,val3,in_prod; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { /* boundary conditions */ if (i >= N-1) val1 = 0.0f; else val1 = D[index] - D[(N*M)*(k) + (i+1) + N*j]; if (j >= M-1) val2 = 0.0f; else val2 = D[index] - D[(N*M)*(k) + i + N*(j+1)]; if (k >= Z-1) val3 = 0.0f; else val3 = D[index] - D[(N*M)*(k+1) + i + N*j]; in_prod = val1*Refd_x[index] + val2*Refd_y[index] + val3*Refd_z[index]; /* calculate inner product */ val1 = val1 - in_prod*Refd_x[index]; val2 = val2 - in_prod*Refd_y[index]; val3 = val3 - in_prod*Refd_z[index]; //Write final result to global memory P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; P3[index] = R3[index] + multip*val3; } return; } __global__ void Proj_dfunc3D_iso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize) { float denom,sq_denom; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { denom = pow(P1[index],2) + pow(P2[index],2) + pow(P3[index],2); if (denom > 1.0f) { sq_denom = 1.0f/sqrt(denom); P1[index] = P1[index]*sq_denom; P2[index] = P2[index]*sq_denom; P3[index] = P3[index]*sq_denom; } } return; } __global__ void Proj_dfunc3D_aniso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize) { float val1, val2, val3; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { val1 = abs(P1[index]); val2 = abs(P2[index]); val3 = abs(P3[index]); if (val1 < 1.0f) {val1 = 1.0f;} if (val2 < 1.0f) {val2 = 1.0f;} if (val3 < 1.0f) {val3 = 1.0f;} P1[index] = P1[index]/val1; P2[index] = P2[index]/val2; P3[index] = P3[index]/val3; } return; } __global__ void Rupd_dfunc3D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *P3, float *P3_old, float *R1, float *R2, float *R3, float tkp1, float tk, float multip2, int N, int M, int Z, int ImSize) { //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]); R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]); R3[index] = P3[index] + multip2*(P3[index] - P3_old[index]); } return; } __global__ void dTVnonneg3D_kernel(float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { if (Output[index] < 0.0f) Output[index] = 0.0f; } } /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ ////////////MAIN HOST FUNCTION /////////////// extern "C" int dTV_FGP_GPU_main(float *Input, float *InputRef, float *Output, float *infovector, float lambdaPar, int iter, float epsil, float eta, int methodTV, int nonneg, int dimX, int dimY, int dimZ) { int deviceCount = -1; // number of devices hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No CUDA devices found\n"); return -1; } int count = 0, i; float re, multip,multip2; re = 0.0f; float tk = 1.0f; float tkp1=1.0f; if (dimZ <= 1) { /*2D verson*/ int ImSize = dimX*dimY; float *d_input, *d_update=NULL, *d_update_prev=NULL, *P1=NULL, *P2=NULL, *P1_prev=NULL, *P2_prev=NULL, *R1=NULL, *R2=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *d_InputRef=NULL; dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(dimX,BLKXSIZE2D), idivup(dimY,BLKYSIZE2D)); /*allocate space for images on device*/ checkCudaErrors( hipMalloc((void**)&d_input,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&d_update,ImSize*sizeof(float)) ); if (epsil != 0.0f) checkCudaErrors( hipMalloc((void**)&d_update_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P1,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P2,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P1_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P2_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&R1,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&R2,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&d_InputRef,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&InputRef_x,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&InputRef_y,ImSize*sizeof(float)) ); checkCudaErrors( hipMemcpy(d_input,Input,ImSize*sizeof(float),hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),hipMemcpyHostToDevice)); hipMemset(P1, 0, ImSize*sizeof(float)); hipMemset(P2, 0, ImSize*sizeof(float)); hipMemset(P1_prev, 0, ImSize*sizeof(float)); hipMemset(P2_prev, 0, ImSize*sizeof(float)); hipMemset(R1, 0, ImSize*sizeof(float)); hipMemset(R2, 0, ImSize*sizeof(float)); hipMemset(InputRef_x, 0, ImSize*sizeof(float)); hipMemset(InputRef_y, 0, ImSize*sizeof(float)); /******************** Run CUDA 2D kernel here ********************/ multip = (1.0f/(8.0f*lambdaPar)); /* calculate gradient vectors for the reference */ hipLaunchKernelGGL(( GradNorm_func2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_InputRef, InputRef_x, InputRef_y, eta, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* The main kernel */ for (i = 0; i < iter; i++) { if ((epsil != 0.0f) && (i % 5 == 0)) { hipLaunchKernelGGL(( dTVcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } /*projects a 2D vector field R-1,2 onto the orthogonal complement of another 2D vector field InputRef_xy*/ hipLaunchKernelGGL(( ProjectVect_func2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* computing the gradient of the objective function */ hipLaunchKernelGGL(( Obj_dfunc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, R1, R2, dimX, dimY, ImSize, lambdaPar); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); if (nonneg != 0) { hipLaunchKernelGGL(( dTVnonneg2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } /*Taking a step towards minus of the gradient*/ hipLaunchKernelGGL(( Grad_dfunc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, d_update, R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize, multip); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* projection step */ if (methodTV == 0)hipLaunchKernelGGL(( Proj_dfunc2D_iso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, dimX, dimY, ImSize); /*isotropic TV*/ elsehipLaunchKernelGGL(( Proj_dfunc2D_aniso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, dimX, dimY, ImSize); /*anisotropic TV*/ checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; multip2 = ((tk-1.0f)/tkp1); hipLaunchKernelGGL(( Rupd_dfunc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, P2, P2_prev, R1, R2, tkp1, tk, multip2, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); hipLaunchKernelGGL(( dTVcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); hipLaunchKernelGGL(( dTVcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, P2, P2_prev, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); tk = tkp1; if ((epsil != 0.0f) && (i % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ hipLaunchKernelGGL(( dTVResidCalc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, P1, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(P1, P1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } /***************************************************************/ //copy result matrix from device to host memory hipMemcpy(Output,d_update,ImSize*sizeof(float),hipMemcpyDeviceToHost); hipFree(d_input); hipFree(d_update); if (epsil != 0.0f) hipFree(d_update_prev); hipFree(P1); hipFree(P2); hipFree(P1_prev); hipFree(P2_prev); hipFree(R1); hipFree(R2); hipFree(d_InputRef); hipFree(InputRef_x); hipFree(InputRef_y); } else { /*3D verson*/ int ImSize = dimX*dimY*dimZ; float *d_input, *d_update=NULL, *d_update_prev, *P1=NULL, *P2=NULL, *P3=NULL, *P1_prev=NULL, *P2_prev=NULL, *P3_prev=NULL, *R1=NULL, *R2=NULL, *R3=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *InputRef_z=NULL, *d_InputRef=NULL; dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE); dim3 dimGrid(idivup(dimX,BLKXSIZE), idivup(dimY,BLKYSIZE),idivup(dimZ,BLKZSIZE)); /*allocate space for images on device*/ checkCudaErrors( hipMalloc((void**)&d_input,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&d_update,ImSize*sizeof(float)) ); if (epsil != 0.0f) checkCudaErrors( hipMalloc((void**)&d_update_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P1,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P2,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P3,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P1_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P2_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P3_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&R1,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&R2,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&R3,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&d_InputRef,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&InputRef_x,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&InputRef_y,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&InputRef_z,ImSize*sizeof(float)) ); checkCudaErrors( hipMemcpy(d_input,Input,ImSize*sizeof(float),hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),hipMemcpyHostToDevice)); hipMemset(P1, 0, ImSize*sizeof(float)); hipMemset(P2, 0, ImSize*sizeof(float)); hipMemset(P3, 0, ImSize*sizeof(float)); hipMemset(P1_prev, 0, ImSize*sizeof(float)); hipMemset(P2_prev, 0, ImSize*sizeof(float)); hipMemset(P3_prev, 0, ImSize*sizeof(float)); hipMemset(R1, 0, ImSize*sizeof(float)); hipMemset(R2, 0, ImSize*sizeof(float)); hipMemset(R3, 0, ImSize*sizeof(float)); hipMemset(InputRef_x, 0, ImSize*sizeof(float)); hipMemset(InputRef_y, 0, ImSize*sizeof(float)); hipMemset(InputRef_z, 0, ImSize*sizeof(float)); /********************** Run CUDA 3D kernel here ********************/ multip = (1.0f/(26.0f*lambdaPar)); /* calculate gradient vectors for the reference */ hipLaunchKernelGGL(( GradNorm_func3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_InputRef, InputRef_x, InputRef_y, InputRef_z, eta, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* The main kernel */ for (i = 0; i < iter; i++) { if ((epsil != 0.0f) && (i % 5 == 0)) { hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } /*projects a 3D vector field R-1,2,3 onto the orthogonal complement of another 3D vector field InputRef_xyz*/ hipLaunchKernelGGL(( ProjectVect_func3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* computing the gradient of the objective function */ hipLaunchKernelGGL(( Obj_dfunc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, R1, R2, R3, dimX, dimY, dimZ, ImSize, lambdaPar); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); if (nonneg != 0) { hipLaunchKernelGGL(( dTVnonneg3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } /*Taking a step towards minus of the gradient*/ hipLaunchKernelGGL(( Grad_dfunc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, d_update, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize, multip); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* projection step */ if (methodTV == 0)hipLaunchKernelGGL(( Proj_dfunc3D_iso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, dimX, dimY, dimZ, ImSize); /* isotropic kernel */ elsehipLaunchKernelGGL(( Proj_dfunc3D_aniso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, dimX, dimY, dimZ, ImSize); /* anisotropic kernel */ checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; multip2 = ((tk-1.0f)/tkp1); hipLaunchKernelGGL(( Rupd_dfunc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, P2, P2_prev, P3, P3_prev, R1, R2, R3, tkp1, tk, multip2, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, P2, P2_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, P3, P3_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); tk = tkp1; if ((epsil != 0.0f) && (i % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ hipLaunchKernelGGL(( dTVResidCalc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, P1, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(P1, P1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } /***************************************************************/ //copy result matrix from device to host memory hipMemcpy(Output,d_update,ImSize*sizeof(float),hipMemcpyDeviceToHost); hipFree(d_input); hipFree(d_update); if (epsil != 0.0f) hipFree(d_update_prev); hipFree(P1); hipFree(P2); hipFree(P3); hipFree(P1_prev); hipFree(P2_prev); hipFree(P3_prev); hipFree(R1); hipFree(R2); hipFree(R3); hipFree(InputRef_x); hipFree(InputRef_y); hipFree(InputRef_z); hipFree(d_InputRef); } /*adding info into info_vector */ infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; }
267b9d476440bb1b7a51e655d02564b90e8a1aec.cu
/* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "shared.h" #include "dTV_FGP_GPU_core.h" #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> /* CUDA implementation of FGP-dTV [1,2] denoising/regularization model (2D/3D case) * which employs structural similarity of the level sets of two images/volumes, see [1,2] * The current implementation updates image 1 while image 2 is being fixed. * * Input Parameters: * 1. Noisy image/volume [REQUIRED] * 2. Additional reference image/volume of the same dimensions as (1) [REQUIRED] * 3. lambdaPar - regularization parameter [REQUIRED] * 4. Number of iterations [OPTIONAL] * 5. eplsilon: tolerance constant [OPTIONAL] * 6. eta: smoothing constant to calculate gradient of the reference [OPTIONAL] * * 7. TV-type: methodTV - 'iso' (0) or 'l1' (1) [OPTIONAL] * 8. nonneg: 'nonnegativity (0 is OFF by default) [OPTIONAL] * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the Matlab's codes and papers by * [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems" * [2] M. J. Ehrhardt and M. M. Betcke, Multi-Contrast MRI Reconstruction with Structure-Guided Total Variation, SIAM Journal on Imaging Sciences 9(3), pp. 1084–1106 */ #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define BLKXSIZE 8 #define BLKYSIZE 8 #define BLKZSIZE 8 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) //struct square { __host__ __device__ float operator()(float x) { return x * x; } }; /************************************************/ /*****************2D modules*********************/ /************************************************/ __global__ void GradNorm_func2D_kernel(float *Refd, float *Refd_x, float *Refd_y, float eta, int N, int M, int ImSize) { float val1, val2, gradX, gradY, magn; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { /* boundary conditions */ if (xIndex >= N-1) val1 = 0.0f; else val1 = Refd[(xIndex+1) + N*yIndex]; if (yIndex >= M-1) val2 = 0.0f; else val2 = Refd[(xIndex) + N*(yIndex + 1)]; gradX = val1 - Refd[index]; gradY = val2 - Refd[index]; magn = pow(gradX,2) + pow(gradY,2); magn = sqrt(magn + pow(eta,2)); Refd_x[index] = gradX/magn; Refd_y[index] = gradY/magn; } return; } __global__ void ProjectVect_func2D_kernel(float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize) { float in_prod; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*Refd_x[index]; R2[index] = R2[index] - in_prod*Refd_y[index]; } return; } __global__ void Obj_dfunc2D_kernel(float *Ad, float *D, float *R1, float *R2, int N, int M, int ImSize, float lambda) { float val1,val2; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { if (xIndex <= 0) {val1 = 0.0f;} else {val1 = R1[(xIndex-1) + N*yIndex];} if (yIndex <= 0) {val2 = 0.0f;} else {val2 = R2[xIndex + N*(yIndex-1)];} //Write final result to global memory D[index] = Ad[index] - lambda*(R1[index] + R2[index] - val1 - val2); } return; } __global__ void Grad_dfunc2D_kernel(float *P1, float *P2, float *D, float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize, float multip) { float val1,val2,in_prod; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { /* boundary conditions */ if (xIndex >= N-1) val1 = 0.0f; else val1 = D[index] - D[(xIndex+1) + N*yIndex]; if (yIndex >= M-1) val2 = 0.0f; else val2 = D[index] - D[(xIndex) + N*(yIndex + 1)]; in_prod = val1*Refd_x[index] + val2*Refd_y[index]; /* calculate inner product */ val1 = val1 - in_prod*Refd_x[index]; val2 = val2 - in_prod*Refd_y[index]; //Write final result to global memory P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; } return; } __global__ void Proj_dfunc2D_iso_kernel(float *P1, float *P2, int N, int M, int ImSize) { float denom; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { denom = pow(P1[index],2) + pow(P2[index],2); if (denom > 1.0f) { P1[index] = P1[index]/sqrt(denom); P2[index] = P2[index]/sqrt(denom); } } return; } __global__ void Proj_dfunc2D_aniso_kernel(float *P1, float *P2, int N, int M, int ImSize) { float val1, val2; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { val1 = abs(P1[index]); val2 = abs(P2[index]); if (val1 < 1.0f) {val1 = 1.0f;} if (val2 < 1.0f) {val2 = 1.0f;} P1[index] = P1[index]/val1; P2[index] = P2[index]/val2; } return; } __global__ void Rupd_dfunc2D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, float multip2, int N, int M, int ImSize) { //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]); R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]); } return; } __global__ void dTVnonneg2D_kernel(float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { if (Output[index] < 0.0f) Output[index] = 0.0f; } } __global__ void dTVcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input[index]; } } __global__ void dTVcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input[index]; } } __global__ void dTVResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } __global__ void dTVResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } /************************************************/ /*****************3D modules*********************/ /************************************************/ __global__ void GradNorm_func3D_kernel(float *Refd, float *Refd_x, float *Refd_y, float *Refd_z, float eta, int N, int M, int Z, int ImSize) { float val1, val2, val3, gradX, gradY, gradZ, magn; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { /* boundary conditions */ if (i >= N-1) val1 = 0.0f; else val1 = Refd[(N*M)*k + (i+1) + N*j]; if (j >= M-1) val2 = 0.0f; else val2 = Refd[(N*M)*k + i + N*(j+1)]; if (k >= Z-1) val3 = 0.0f; else val3 = Refd[(N*M)*(k+1) + i + N*j]; gradX = val1 - Refd[index]; gradY = val2 - Refd[index]; gradZ = val3 - Refd[index]; magn = pow(gradX,2) + pow(gradY,2) + pow(gradZ,2); magn = sqrt(magn + pow(eta,2)); Refd_x[index] = gradX/magn; Refd_y[index] = gradY/magn; Refd_z[index] = gradZ/magn; } return; } __global__ void ProjectVect_func3D_kernel(float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize) { float in_prod; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index] + R3[index]*Refd_z[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*Refd_x[index]; R2[index] = R2[index] - in_prod*Refd_y[index]; R3[index] = R3[index] - in_prod*Refd_z[index]; } return; } __global__ void Obj_dfunc3D_kernel(float *Ad, float *D, float *R1, float *R2, float *R3, int N, int M, int Z, int ImSize, float lambda) { float val1,val2,val3; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { if (i <= 0) {val1 = 0.0f;} else {val1 = R1[(N*M)*(k) + (i-1) + N*j];} if (j <= 0) {val2 = 0.0f;} else {val2 = R2[(N*M)*(k) + i + N*(j-1)];} if (k <= 0) {val3 = 0.0f;} else {val3 = R3[(N*M)*(k-1) + i + N*j];} //Write final result to global memory D[index] = Ad[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3); } return; } __global__ void Grad_dfunc3D_kernel(float *P1, float *P2, float *P3, float *D, float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize, float multip) { float val1,val2,val3,in_prod; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { /* boundary conditions */ if (i >= N-1) val1 = 0.0f; else val1 = D[index] - D[(N*M)*(k) + (i+1) + N*j]; if (j >= M-1) val2 = 0.0f; else val2 = D[index] - D[(N*M)*(k) + i + N*(j+1)]; if (k >= Z-1) val3 = 0.0f; else val3 = D[index] - D[(N*M)*(k+1) + i + N*j]; in_prod = val1*Refd_x[index] + val2*Refd_y[index] + val3*Refd_z[index]; /* calculate inner product */ val1 = val1 - in_prod*Refd_x[index]; val2 = val2 - in_prod*Refd_y[index]; val3 = val3 - in_prod*Refd_z[index]; //Write final result to global memory P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; P3[index] = R3[index] + multip*val3; } return; } __global__ void Proj_dfunc3D_iso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize) { float denom,sq_denom; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { denom = pow(P1[index],2) + pow(P2[index],2) + pow(P3[index],2); if (denom > 1.0f) { sq_denom = 1.0f/sqrt(denom); P1[index] = P1[index]*sq_denom; P2[index] = P2[index]*sq_denom; P3[index] = P3[index]*sq_denom; } } return; } __global__ void Proj_dfunc3D_aniso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize) { float val1, val2, val3; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { val1 = abs(P1[index]); val2 = abs(P2[index]); val3 = abs(P3[index]); if (val1 < 1.0f) {val1 = 1.0f;} if (val2 < 1.0f) {val2 = 1.0f;} if (val3 < 1.0f) {val3 = 1.0f;} P1[index] = P1[index]/val1; P2[index] = P2[index]/val2; P3[index] = P3[index]/val3; } return; } __global__ void Rupd_dfunc3D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *P3, float *P3_old, float *R1, float *R2, float *R3, float tkp1, float tk, float multip2, int N, int M, int Z, int ImSize) { //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]); R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]); R3[index] = P3[index] + multip2*(P3[index] - P3_old[index]); } return; } __global__ void dTVnonneg3D_kernel(float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { if (Output[index] < 0.0f) Output[index] = 0.0f; } } /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ ////////////MAIN HOST FUNCTION /////////////// extern "C" int dTV_FGP_GPU_main(float *Input, float *InputRef, float *Output, float *infovector, float lambdaPar, int iter, float epsil, float eta, int methodTV, int nonneg, int dimX, int dimY, int dimZ) { int deviceCount = -1; // number of devices cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No CUDA devices found\n"); return -1; } int count = 0, i; float re, multip,multip2; re = 0.0f; float tk = 1.0f; float tkp1=1.0f; if (dimZ <= 1) { /*2D verson*/ int ImSize = dimX*dimY; float *d_input, *d_update=NULL, *d_update_prev=NULL, *P1=NULL, *P2=NULL, *P1_prev=NULL, *P2_prev=NULL, *R1=NULL, *R2=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *d_InputRef=NULL; dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(dimX,BLKXSIZE2D), idivup(dimY,BLKYSIZE2D)); /*allocate space for images on device*/ checkCudaErrors( cudaMalloc((void**)&d_input,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&d_update,ImSize*sizeof(float)) ); if (epsil != 0.0f) checkCudaErrors( cudaMalloc((void**)&d_update_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P1,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P2,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P1_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P2_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&R1,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&R2,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&d_InputRef,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&InputRef_x,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&InputRef_y,ImSize*sizeof(float)) ); checkCudaErrors( cudaMemcpy(d_input,Input,ImSize*sizeof(float),cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),cudaMemcpyHostToDevice)); cudaMemset(P1, 0, ImSize*sizeof(float)); cudaMemset(P2, 0, ImSize*sizeof(float)); cudaMemset(P1_prev, 0, ImSize*sizeof(float)); cudaMemset(P2_prev, 0, ImSize*sizeof(float)); cudaMemset(R1, 0, ImSize*sizeof(float)); cudaMemset(R2, 0, ImSize*sizeof(float)); cudaMemset(InputRef_x, 0, ImSize*sizeof(float)); cudaMemset(InputRef_y, 0, ImSize*sizeof(float)); /******************** Run CUDA 2D kernel here ********************/ multip = (1.0f/(8.0f*lambdaPar)); /* calculate gradient vectors for the reference */ GradNorm_func2D_kernel<<<dimGrid,dimBlock>>>(d_InputRef, InputRef_x, InputRef_y, eta, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* The main kernel */ for (i = 0; i < iter; i++) { if ((epsil != 0.0f) && (i % 5 == 0)) { dTVcopy_kernel2D<<<dimGrid,dimBlock>>>(d_update, d_update_prev, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } /*projects a 2D vector field R-1,2 onto the orthogonal complement of another 2D vector field InputRef_xy*/ ProjectVect_func2D_kernel<<<dimGrid,dimBlock>>>(R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* computing the gradient of the objective function */ Obj_dfunc2D_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, R1, R2, dimX, dimY, ImSize, lambdaPar); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); if (nonneg != 0) { dTVnonneg2D_kernel<<<dimGrid,dimBlock>>>(d_update, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } /*Taking a step towards minus of the gradient*/ Grad_dfunc2D_kernel<<<dimGrid,dimBlock>>>(P1, P2, d_update, R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize, multip); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* projection step */ if (methodTV == 0) Proj_dfunc2D_iso_kernel<<<dimGrid,dimBlock>>>(P1, P2, dimX, dimY, ImSize); /*isotropic TV*/ else Proj_dfunc2D_aniso_kernel<<<dimGrid,dimBlock>>>(P1, P2, dimX, dimY, ImSize); /*anisotropic TV*/ checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; multip2 = ((tk-1.0f)/tkp1); Rupd_dfunc2D_kernel<<<dimGrid,dimBlock>>>(P1, P1_prev, P2, P2_prev, R1, R2, tkp1, tk, multip2, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); dTVcopy_kernel2D<<<dimGrid,dimBlock>>>(P1, P1_prev, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); dTVcopy_kernel2D<<<dimGrid,dimBlock>>>(P2, P2_prev, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); tk = tkp1; if ((epsil != 0.0f) && (i % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ dTVResidCalc2D_kernel<<<dimGrid,dimBlock>>>(d_update, d_update_prev, P1, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(P1, P1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } /***************************************************************/ //copy result matrix from device to host memory cudaMemcpy(Output,d_update,ImSize*sizeof(float),cudaMemcpyDeviceToHost); cudaFree(d_input); cudaFree(d_update); if (epsil != 0.0f) cudaFree(d_update_prev); cudaFree(P1); cudaFree(P2); cudaFree(P1_prev); cudaFree(P2_prev); cudaFree(R1); cudaFree(R2); cudaFree(d_InputRef); cudaFree(InputRef_x); cudaFree(InputRef_y); } else { /*3D verson*/ int ImSize = dimX*dimY*dimZ; float *d_input, *d_update=NULL, *d_update_prev, *P1=NULL, *P2=NULL, *P3=NULL, *P1_prev=NULL, *P2_prev=NULL, *P3_prev=NULL, *R1=NULL, *R2=NULL, *R3=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *InputRef_z=NULL, *d_InputRef=NULL; dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE); dim3 dimGrid(idivup(dimX,BLKXSIZE), idivup(dimY,BLKYSIZE),idivup(dimZ,BLKZSIZE)); /*allocate space for images on device*/ checkCudaErrors( cudaMalloc((void**)&d_input,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&d_update,ImSize*sizeof(float)) ); if (epsil != 0.0f) checkCudaErrors( cudaMalloc((void**)&d_update_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P1,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P2,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P3,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P1_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P2_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P3_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&R1,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&R2,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&R3,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&d_InputRef,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&InputRef_x,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&InputRef_y,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&InputRef_z,ImSize*sizeof(float)) ); checkCudaErrors( cudaMemcpy(d_input,Input,ImSize*sizeof(float),cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),cudaMemcpyHostToDevice)); cudaMemset(P1, 0, ImSize*sizeof(float)); cudaMemset(P2, 0, ImSize*sizeof(float)); cudaMemset(P3, 0, ImSize*sizeof(float)); cudaMemset(P1_prev, 0, ImSize*sizeof(float)); cudaMemset(P2_prev, 0, ImSize*sizeof(float)); cudaMemset(P3_prev, 0, ImSize*sizeof(float)); cudaMemset(R1, 0, ImSize*sizeof(float)); cudaMemset(R2, 0, ImSize*sizeof(float)); cudaMemset(R3, 0, ImSize*sizeof(float)); cudaMemset(InputRef_x, 0, ImSize*sizeof(float)); cudaMemset(InputRef_y, 0, ImSize*sizeof(float)); cudaMemset(InputRef_z, 0, ImSize*sizeof(float)); /********************** Run CUDA 3D kernel here ********************/ multip = (1.0f/(26.0f*lambdaPar)); /* calculate gradient vectors for the reference */ GradNorm_func3D_kernel<<<dimGrid,dimBlock>>>(d_InputRef, InputRef_x, InputRef_y, InputRef_z, eta, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* The main kernel */ for (i = 0; i < iter; i++) { if ((epsil != 0.0f) && (i % 5 == 0)) { dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(d_update, d_update_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } /*projects a 3D vector field R-1,2,3 onto the orthogonal complement of another 3D vector field InputRef_xyz*/ ProjectVect_func3D_kernel<<<dimGrid,dimBlock>>>(R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* computing the gradient of the objective function */ Obj_dfunc3D_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, R1, R2, R3, dimX, dimY, dimZ, ImSize, lambdaPar); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); if (nonneg != 0) { dTVnonneg3D_kernel<<<dimGrid,dimBlock>>>(d_update, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } /*Taking a step towards minus of the gradient*/ Grad_dfunc3D_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, d_update, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize, multip); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* projection step */ if (methodTV == 0) Proj_dfunc3D_iso_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, dimX, dimY, dimZ, ImSize); /* isotropic kernel */ else Proj_dfunc3D_aniso_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, dimX, dimY, dimZ, ImSize); /* anisotropic kernel */ checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; multip2 = ((tk-1.0f)/tkp1); Rupd_dfunc3D_kernel<<<dimGrid,dimBlock>>>(P1, P1_prev, P2, P2_prev, P3, P3_prev, R1, R2, R3, tkp1, tk, multip2, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(P1, P1_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(P2, P2_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(P3, P3_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); tk = tkp1; if ((epsil != 0.0f) && (i % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ dTVResidCalc3D_kernel<<<dimGrid,dimBlock>>>(d_update, d_update_prev, P1, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(P1, P1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } /***************************************************************/ //copy result matrix from device to host memory cudaMemcpy(Output,d_update,ImSize*sizeof(float),cudaMemcpyDeviceToHost); cudaFree(d_input); cudaFree(d_update); if (epsil != 0.0f) cudaFree(d_update_prev); cudaFree(P1); cudaFree(P2); cudaFree(P3); cudaFree(P1_prev); cudaFree(P2_prev); cudaFree(P3_prev); cudaFree(R1); cudaFree(R2); cudaFree(R3); cudaFree(InputRef_x); cudaFree(InputRef_y); cudaFree(InputRef_z); cudaFree(d_InputRef); } /*adding info into info_vector */ infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; }
055b7c9b0c71ccab70eae830f33504663c2ee54c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"kernel_funcs.h" #include"device_funcs.cuh" #include<hiprand/hiprand.h> #include "common.hpp" #include"host_funcs.hpp" __global__ void InitialKernel(double* Result,int nx,int ny) { // unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx ){ for (int iy = 0; iy <ny; iy++) { int idx = iy * nx + ix; //px0nan if((idx>=1*nx)&&(idx<2*nx)){ if(Ekall(Result[idx-nx])>=0.0) Result[idx] = Px(double(Result[idx-nx])); else Result[idx] = 0.0; } //fx if((idx>=2*nx)&&(idx<3*nx)){ if(Result[idx-1*nx]>0.0) Result[idx] = fx(double(Result[idx-2*nx])); else Result[idx] = 0.0; } //compute if((idx>=3*nx)&&(idx<4*nx)){ if(Result[idx-2*nx]>0.0) Result[idx] = Result[idx-3*nx]; else Result[idx] = 0.0; } if((idx>=4*nx)&&(idx<5*nx)) Result[idx] = Result[idx-3*nx]; } } } void NormalRandom(double *ip, const int size){ hiprandGenerator_t gen; // hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MRG32K3A);//1 hiprandSetPseudoRandomGeneratorSeed(gen, 11ULL); //2 hiprandGenerateNormalDouble(gen, ip, size, 0, 0.7); //31 hiprandDestroyGenerator(gen); // return; } void InitialMatrix(double* d_Result,int nx,int ny){ NormalRandom(d_Result,nx); //grid,block int dimx = 256; dim3 block(dimx, 1); dim3 grid((nx + block.x - 1) / block.x, 1); hipLaunchKernelGGL(( InitialKernel), dim3(grid),dim3(block), 0, 0, d_Result,nx,ny); CHECK(hipDeviceSynchronize()); CHECK(hipGetLastError()); //compute int nxy = nx * ny; int nBytes = nxy * sizeof(double); double *h_gpuRef; h_gpuRef = (double *)malloc(nBytes); CHECK(hipMemcpy(h_gpuRef, d_Result, nBytes, hipMemcpyDeviceToHost)); // double iStart = seconds(); StoreData(h_gpuRef,nx,ny,"init.dat"); double iElaps = seconds() - iStart; printf("STORE THE InitialKernel DATA elapsed %lf sec\n",iElaps); } __global__ void ComputeKernel(double* Result,int nx,int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int idxOfXi = 3 * nx + ix; unsigned int idxOfPxi = 4 * nx + ix; unsigned int idxOfXiTwo = 5 * nx + ix; unsigned int idxOfPxiTwo = 6 * nx + ix; unsigned int idxOfTemp = 7 * nx + ix; if(ix<nx && Result[idxOfXi]!=0.0){ for(int i=0;i<STEPSFIRST;i++){ updateXi(Result[idxOfXi],Result[idxOfPxi]); } Result[idxOfXiTwo] = Result[idxOfXi]; Result[idxOfPxiTwo] = Result[idxOfPxi]; for(int i=0;i<STEPSSECOND;i++){ updateXiAtStepTwo(Result[idxOfXiTwo],Result[idxOfPxiTwo],i*DX); } double TempE=0.5 * (pow(Result[idxOfPxiTwo],2.0)) - (1.0 / sqrt( pow(Result[idxOfXiTwo],2.0)+ pow(A,2.0))); if( TempE <= 0.0) Result[idxOfTemp]=-999; } } int CountZeros(double* h_Result,int nx) { unsigned int idxOfXi = nx ; unsigned int idxOfTemp = 7 * nx ; int count=0; for(int i=0;i<nx;i++){ if(h_Result[idxOfXi+i] == 0.0f) count++; //if(h_Result[idxOfTemp+i] == -999) nonZeros++; } return count; } int CountTooBig(double* h_Result,int nx) { unsigned int idxOfXi = nx ; unsigned int idxOfTemp = 7 * nx ; int count=0; for(int i=0;i<nx;i++){ //if(h_Result[idxOfXi+i] == 0.0) count++; if(h_Result[idxOfTemp+i] == -999) count++; } return count; } void ComputeOnGPU1(double* Result,int nx,int ny,double* h_gpuRef){ //grid,block int dimx = 512; dim3 block(dimx); dim3 grid((nx + block.x - 1) / block.x, 1); double iStart = seconds(); hipLaunchKernelGGL(( ComputeKernel), dim3(grid),dim3(block), 0, 0, Result,nx,ny); CHECK(hipDeviceSynchronize()); // CHECK(hipGetLastError()); double iElaps = seconds() - iStart; printf("RungeOnGPU elapsed %f sec\n",iElaps); // GPU int nxy = nx * ny; int nBytes = nxy * sizeof(double); CHECK(hipMemcpy(h_gpuRef, Result, nBytes, hipMemcpyDeviceToHost)); int zeros=0,nonzeros=0; zeros = CountZeros(h_gpuRef,nx); nonzeros = CountTooBig(h_gpuRef,nx); printf("The Number of Zeros is %d,\t The Number of NonZeros is %d \n",zeros,nonzeros); double per = (nx - zeros - nonzeros)/(nx - zeros); printf("Percentage is %lf \n",per); // iStart = seconds(); StoreData(h_gpuRef,nx,ny,"gpuStepTwo1202.dat"); //StoreData(h_Random,1,ny,"h_Random.dat"); iElaps = seconds() - iStart; printf("STORE THE ComputeKernel DATA elapsed %lf sec\n",iElaps); return; }
055b7c9b0c71ccab70eae830f33504663c2ee54c.cu
#include"kernel_funcs.h" #include"device_funcs.cuh" #include<curand.h> #include "common.hpp" #include"host_funcs.hpp" __global__ void InitialKernel(double* Result,int nx,int ny) { //第一列已经是随机数了 unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx ){ for (int iy = 0; iy <ny; iy++) { int idx = iy * nx + ix; //第二列为第一列各自的px初值,如果出现根号下小于零的情况,直接赋值0,计算部分判断简单些(nan判定很烦……) if((idx>=1*nx)&&(idx<2*nx)){ if(Ekall(Result[idx-nx])>=0.0) Result[idx] = Px(double(Result[idx-nx])); else Result[idx] = 0.0; } //第三列为第一列各自的fx初值,出现小于零情况同理。 if((idx>=2*nx)&&(idx<3*nx)){ if(Result[idx-1*nx]>0.0) Result[idx] = fx(double(Result[idx-2*nx])); else Result[idx] = 0.0; } //第四五六列为前三列的复制,为了compute函数准备 if((idx>=3*nx)&&(idx<4*nx)){ if(Result[idx-2*nx]>0.0) Result[idx] = Result[idx-3*nx]; else Result[idx] = 0.0; } if((idx>=4*nx)&&(idx<5*nx)) Result[idx] = Result[idx-3*nx]; } } } void NormalRandom(double *ip, const int size){ curandGenerator_t gen; //生成随机数变量 curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MRG32K3A);//步骤1:指定算法 curandSetPseudoRandomGeneratorSeed(gen, 11ULL); //步骤2:随机数初始化 curandGenerateNormalDouble(gen, ip, size, 0, 0.7); //步骤3:生成随机数,存储到缓冲器中(第1个数字为均值,第二个为方差) curandDestroyGenerator(gen); //释放指针 return; } void InitialMatrix(double* d_Result,int nx,int ny){ NormalRandom(d_Result,nx); //分配grid,block大小 int dimx = 256; dim3 block(dimx, 1); dim3 grid((nx + block.x - 1) / block.x, 1); InitialKernel<<<grid,block>>>(d_Result,nx,ny); CHECK(cudaDeviceSynchronize()); CHECK(cudaGetLastError()); //保存数据仅仅为了测试用,写好compute部分以后肯定不用保存这个数据了…… int nxy = nx * ny; int nBytes = nxy * sizeof(double); double *h_gpuRef; h_gpuRef = (double *)malloc(nBytes); CHECK(cudaMemcpy(h_gpuRef, d_Result, nBytes, cudaMemcpyDeviceToHost)); //保存数据 double iStart = seconds(); StoreData(h_gpuRef,nx,ny,"init.dat"); double iElaps = seconds() - iStart; printf("STORE THE InitialKernel DATA elapsed %lf sec\n",iElaps); } __global__ void ComputeKernel(double* Result,int nx,int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int idxOfXi = 3 * nx + ix; unsigned int idxOfPxi = 4 * nx + ix; unsigned int idxOfXiTwo = 5 * nx + ix; unsigned int idxOfPxiTwo = 6 * nx + ix; unsigned int idxOfTemp = 7 * nx + ix; if(ix<nx && Result[idxOfXi]!=0.0){ for(int i=0;i<STEPSFIRST;i++){ updateXi(Result[idxOfXi],Result[idxOfPxi]); } Result[idxOfXiTwo] = Result[idxOfXi]; Result[idxOfPxiTwo] = Result[idxOfPxi]; for(int i=0;i<STEPSSECOND;i++){ updateXiAtStepTwo(Result[idxOfXiTwo],Result[idxOfPxiTwo],i*DX); } double TempE=0.5 * (pow(Result[idxOfPxiTwo],2.0)) - (1.0 / sqrt( pow(Result[idxOfXiTwo],2.0)+ pow(A,2.0))); if( TempE <= 0.0) Result[idxOfTemp]=-999; } } int CountZeros(double* h_Result,int nx) { unsigned int idxOfXi = nx ; unsigned int idxOfTemp = 7 * nx ; int count=0; for(int i=0;i<nx;i++){ if(h_Result[idxOfXi+i] == 0.0f) count++; //if(h_Result[idxOfTemp+i] == -999) nonZeros++; } return count; } int CountTooBig(double* h_Result,int nx) { unsigned int idxOfXi = nx ; unsigned int idxOfTemp = 7 * nx ; int count=0; for(int i=0;i<nx;i++){ //if(h_Result[idxOfXi+i] == 0.0) count++; if(h_Result[idxOfTemp+i] == -999) count++; } return count; } void ComputeOnGPU1(double* Result,int nx,int ny,double* h_gpuRef){ //分配grid,block大小 int dimx = 512; dim3 block(dimx); dim3 grid((nx + block.x - 1) / block.x, 1); double iStart = seconds(); ComputeKernel<<<grid,block>>>(Result,nx,ny); CHECK(cudaDeviceSynchronize()); //如果核函数错误,返回信息 CHECK(cudaGetLastError()); double iElaps = seconds() - iStart; printf("RungeOnGPU elapsed %f sec\n",iElaps); // GPU数据拷贝回主机 int nxy = nx * ny; int nBytes = nxy * sizeof(double); CHECK(cudaMemcpy(h_gpuRef, Result, nBytes, cudaMemcpyDeviceToHost)); int zeros=0,nonzeros=0; zeros = CountZeros(h_gpuRef,nx); nonzeros = CountTooBig(h_gpuRef,nx); printf("The Number of Zeros is %d,\t The Number of NonZeros is %d \n",zeros,nonzeros); double per = (nx - zeros - nonzeros)/(nx - zeros); printf("Percentage is %lf \n",per); //保存数据 iStart = seconds(); StoreData(h_gpuRef,nx,ny,"gpuStepTwo1202.dat"); //StoreData(h_Random,1,ny,"h_Random.dat"); iElaps = seconds() - iStart; printf("STORE THE ComputeKernel DATA elapsed %lf sec\n",iElaps); return; }
585f3ae1197efc7057884ce7ad76cbe991b8bc7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include <stdio.h> #include <assert.h> #include <thrust/functional.h> __global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel( float *output, float *total_weight, float *input, float *target, float *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { __shared__ float partial_sums[CUDA_NUM_THREADS]; int i, t; float cur_weight; float input_sum = 0; float acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i] - 1; assert(t >= 0 && t < n_classes); cur_weight = weights ? weights[t] : 1.0f; input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } __syncthreads(); input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<float>(), 0.0f); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<float>(), 0.0f); if (threadIdx.x == 0) { atomicAdd(total_weight, acc_weight); if (size_average && acc_weight > 0) atomicAdd(output, input_sum / acc_weight / gridDim.x); else atomicAdd(output, input_sum); } } __global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel( float *gradInput, float *target, float *weights, float *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { if (*total_weight <= 0) return; int i, t; float norm = size_average ? (1.0f / *total_weight) : 1.0f; int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i] - 1; assert(t >= 0 && t < n_classes); gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : 1.0f) * norm; } } void THNN_CudaSpatialClassNLLCriterion_updateOutput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); if (weights) THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *target_data = THCudaTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; THCudaTensor_fill(state, output, 0); THCudaTensor_fill(state, total_weight, 0); hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateOutput_kernel) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), blocks_per_sample ); hipError_t errcode = hipGetLastError(); if (errcode != hipSuccess) { THError(hipGetErrorString(errcode)); } if (weights) THCudaTensor_free(state, weights); THCudaTensor_free(state, target); THCudaTensor_free(state, input); } void THNN_CudaSpatialClassNLLCriterion_updateGradInput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (weights) THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); float *target_data = THCudaTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateGradInput_kernel) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), gradInput_data, target_data, weights_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) *THCudaTensor_size(state, input, 3), blocks_per_sample ); hipError_t errcode = hipGetLastError(); if (errcode != hipSuccess) { THError(hipGetErrorString(errcode)); } if (weights) THCudaTensor_free(state, weights); THCudaTensor_free(state, target); THCudaTensor_free(state, input); }
585f3ae1197efc7057884ce7ad76cbe991b8bc7d.cu
#include "THCUNN.h" #include "common.h" #include <stdio.h> #include <assert.h> #include <thrust/functional.h> __global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel( float *output, float *total_weight, float *input, float *target, float *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { __shared__ float partial_sums[CUDA_NUM_THREADS]; int i, t; float cur_weight; float input_sum = 0; float acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i] - 1; assert(t >= 0 && t < n_classes); cur_weight = weights ? weights[t] : 1.0f; input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } __syncthreads(); input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<float>(), 0.0f); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<float>(), 0.0f); if (threadIdx.x == 0) { atomicAdd(total_weight, acc_weight); if (size_average && acc_weight > 0) atomicAdd(output, input_sum / acc_weight / gridDim.x); else atomicAdd(output, input_sum); } } __global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel( float *gradInput, float *target, float *weights, float *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { if (*total_weight <= 0) return; int i, t; float norm = size_average ? (1.0f / *total_weight) : 1.0f; int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i] - 1; assert(t >= 0 && t < n_classes); gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : 1.0f) * norm; } } void THNN_CudaSpatialClassNLLCriterion_updateOutput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); if (weights) THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *target_data = THCudaTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; THCudaTensor_fill(state, output, 0); THCudaTensor_fill(state, total_weight, 0); cunn_SpatialClassNLLCriterion_updateOutput_kernel <<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), blocks_per_sample ); cudaError errcode = cudaGetLastError(); if (errcode != cudaSuccess) { THError(cudaGetErrorString(errcode)); } if (weights) THCudaTensor_free(state, weights); THCudaTensor_free(state, target); THCudaTensor_free(state, input); } void THNN_CudaSpatialClassNLLCriterion_updateGradInput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (weights) THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); float *target_data = THCudaTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; cunn_SpatialClassNLLCriterion_updateGradInput_kernel <<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( gradInput_data, target_data, weights_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) *THCudaTensor_size(state, input, 3), blocks_per_sample ); cudaError errcode = cudaGetLastError(); if (errcode != cudaSuccess) { THError(cudaGetErrorString(errcode)); } if (weights) THCudaTensor_free(state, weights); THCudaTensor_free(state, target); THCudaTensor_free(state, input); }
45abca36a4a0ca5473a12d0192112cce8a3f2090.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "detectNet.h" #include "cudaUtility.h" template<typename T> __global__ void gpuDetectionOverlay( T* input, T* output, int width, int height, detectNet::Detection* detections, int numDetections, float4* colors ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= width || y >= height ) return; const T px_in = input[ y * width + x ]; T px_out = px_in; const float fx = x; const float fy = y; for( int n=0; n < numDetections; n++ ) { const detectNet::Detection det = detections[n]; // check if this pixel is inside the bounding box if( fx >= det.Left && fx <= det.Right && fy >= det.Top && fy <= det.Bottom ) { const float4 color = colors[det.ClassID]; const float alpha = color.w / 255.0f; const float ialph = 1.0f - alpha; px_out.x = alpha * color.x + ialph * px_out.x; px_out.y = alpha * color.y + ialph * px_out.y; px_out.z = alpha * color.z + ialph * px_out.z; } } output[y * width + x] = px_out; } template<typename T> __global__ void gpuDetectionOverlayBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color ) { const int box_x = blockIdx.x * blockDim.x + threadIdx.x; const int box_y = blockIdx.y * blockDim.y + threadIdx.y; if( box_x >= boxWidth || box_y >= boxHeight ) return; const int x = box_x + x0; const int y = box_y + y0; if( x >= imgWidth || y >= imgHeight ) return; T px = input[ y * imgWidth + x ]; const float alpha = color.w / 255.0f; const float ialph = 1.0f - alpha; px.x = alpha * color.x + ialph * px.x; px.y = alpha * color.y + ialph * px.y; px.z = alpha * color.z + ialph * px.z; output[y * imgWidth + x] = px; } template<typename T> hipError_t launchDetectionOverlay( T* input, T* output, uint32_t width, uint32_t height, detectNet::Detection* detections, int numDetections, float4* colors ) { if( !input || !output || width == 0 || height == 0 || !detections || numDetections == 0 || !colors ) return hipErrorInvalidValue; // this assumes that the output already has the input image copied to it, // which if input != output, is done first by detectNet::Detect() for( int n=0; n < numDetections; n++ ) { const int boxWidth = (int)detections[n].Width(); const int boxHeight = (int)detections[n].Height(); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y)); hipLaunchKernelGGL(( gpuDetectionOverlayBox<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, (int)detections[n].Left, (int)detections[n].Top, boxWidth, boxHeight, colors[detections[n].ClassID]); } return hipGetLastError(); } hipError_t cudaDetectionOverlay( void* input, void* output, uint32_t width, uint32_t height, imageFormat format, detectNet::Detection* detections, int numDetections, float4* colors ) { if( format == IMAGE_RGB8 ) return launchDetectionOverlay<uchar3>((uchar3*)input, (uchar3*)output, width, height, detections, numDetections, colors); else if( format == IMAGE_RGBA8 ) return launchDetectionOverlay<uchar4>((uchar4*)input, (uchar4*)output, width, height, detections, numDetections, colors); else if( format == IMAGE_RGB32F ) return launchDetectionOverlay<float3>((float3*)input, (float3*)output, width, height, detections, numDetections, colors); else if( format == IMAGE_RGBA32F ) return launchDetectionOverlay<float4>((float4*)input, (float4*)output, width, height, detections, numDetections, colors); else return hipErrorInvalidValue; }
45abca36a4a0ca5473a12d0192112cce8a3f2090.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "detectNet.h" #include "cudaUtility.h" template<typename T> __global__ void gpuDetectionOverlay( T* input, T* output, int width, int height, detectNet::Detection* detections, int numDetections, float4* colors ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= width || y >= height ) return; const T px_in = input[ y * width + x ]; T px_out = px_in; const float fx = x; const float fy = y; for( int n=0; n < numDetections; n++ ) { const detectNet::Detection det = detections[n]; // check if this pixel is inside the bounding box if( fx >= det.Left && fx <= det.Right && fy >= det.Top && fy <= det.Bottom ) { const float4 color = colors[det.ClassID]; const float alpha = color.w / 255.0f; const float ialph = 1.0f - alpha; px_out.x = alpha * color.x + ialph * px_out.x; px_out.y = alpha * color.y + ialph * px_out.y; px_out.z = alpha * color.z + ialph * px_out.z; } } output[y * width + x] = px_out; } template<typename T> __global__ void gpuDetectionOverlayBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color ) { const int box_x = blockIdx.x * blockDim.x + threadIdx.x; const int box_y = blockIdx.y * blockDim.y + threadIdx.y; if( box_x >= boxWidth || box_y >= boxHeight ) return; const int x = box_x + x0; const int y = box_y + y0; if( x >= imgWidth || y >= imgHeight ) return; T px = input[ y * imgWidth + x ]; const float alpha = color.w / 255.0f; const float ialph = 1.0f - alpha; px.x = alpha * color.x + ialph * px.x; px.y = alpha * color.y + ialph * px.y; px.z = alpha * color.z + ialph * px.z; output[y * imgWidth + x] = px; } template<typename T> cudaError_t launchDetectionOverlay( T* input, T* output, uint32_t width, uint32_t height, detectNet::Detection* detections, int numDetections, float4* colors ) { if( !input || !output || width == 0 || height == 0 || !detections || numDetections == 0 || !colors ) return cudaErrorInvalidValue; // this assumes that the output already has the input image copied to it, // which if input != output, is done first by detectNet::Detect() for( int n=0; n < numDetections; n++ ) { const int boxWidth = (int)detections[n].Width(); const int boxHeight = (int)detections[n].Height(); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y)); gpuDetectionOverlayBox<T><<<gridDim, blockDim>>>(input, output, width, height, (int)detections[n].Left, (int)detections[n].Top, boxWidth, boxHeight, colors[detections[n].ClassID]); } return cudaGetLastError(); } cudaError_t cudaDetectionOverlay( void* input, void* output, uint32_t width, uint32_t height, imageFormat format, detectNet::Detection* detections, int numDetections, float4* colors ) { if( format == IMAGE_RGB8 ) return launchDetectionOverlay<uchar3>((uchar3*)input, (uchar3*)output, width, height, detections, numDetections, colors); else if( format == IMAGE_RGBA8 ) return launchDetectionOverlay<uchar4>((uchar4*)input, (uchar4*)output, width, height, detections, numDetections, colors); else if( format == IMAGE_RGB32F ) return launchDetectionOverlay<float3>((float3*)input, (float3*)output, width, height, detections, numDetections, colors); else if( format == IMAGE_RGBA32F ) return launchDetectionOverlay<float4>((float4*)input, (float4*)output, width, height, detections, numDetections, colors); else return cudaErrorInvalidValue; }
d8d9808fa42f637d1d0bce76c863d6bc95817443.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../common/book.h" #define N 10 __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // handle the data at this index if (tid < N) c[tid] = a[tid] + b[tid]; } int main( void ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU HANDLE_ERROR( hipMalloc( (void**)&dev_a, N * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_b, N * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_c, N * sizeof(int) ) ); // fill the arrays 'a' and 'b' on the CPU for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice ) ); hipLaunchKernelGGL(( add), dim3(N), dim3(1), 0, 0, dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( hipMemcpy( c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost ) ); // display the results for (int i = 0; i < N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // free the memory allocated on the GPU hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); return 0; }
d8d9808fa42f637d1d0bce76c863d6bc95817443.cu
#include "../common/book.h" #define N 10 __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // handle the data at this index if (tid < N) c[tid] = a[tid] + b[tid]; } int main( void ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) ); // fill the arrays 'a' and 'b' on the CPU for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ) ); add<<<N, 1>>>( dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ) ); // display the results for (int i = 0; i < N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // free the memory allocated on the GPU cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; }
a35667a0ab346dc914eb647f368502bddbd1e1ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "star2d2r-512-8-512_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_6_3; float __reg_6_4; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_7_3; float __reg_7_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_7_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 29); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 29); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0); __STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1); } } else { for (__h = 33; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_6_3; float __reg_6_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_6_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0); __STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1); __STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2); } } else { for (__h = 29; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_5_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, __h + 2); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, __h + 2); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, __h + 3); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); } } else { for (__h = 25; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_4_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, __h + 3); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); } } else { for (__h = 21; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_3_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, __h + 3); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_2_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); } } else { for (__h = 13; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_1_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); } } else { for (__h = 9; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); } } else { for (__h = 5; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; } }
a35667a0ab346dc914eb647f368502bddbd1e1ec.cu
#include "star2d2r-512-8-512_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_6_3; float __reg_6_4; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_7_3; float __reg_7_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_7_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 29); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 29); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0); __STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1); } } else { for (__h = 33; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_6_3; float __reg_6_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_6_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0); __STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1); __STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2); } } else { for (__h = 29; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_5_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, __h + 2); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, __h + 2); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, __h + 3); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); } } else { for (__h = 25; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_4_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, __h + 3); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); } } else { for (__h = 21; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_3_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, __h + 3); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_2_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); } } else { for (__h = 13; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_1_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); } } else { for (__h = 9; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); } } else { for (__h = 5; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; } }
1700067fe3be7dddfba8dea8d834a43dcf8390f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <ctime> #include <vector> #include <algorithm> #include <stdlib.h> // utilities #include <helper_cuda.h> #include <time.h> ///////////per request timing. L1 enabled. P100. ///////////using more than 8gb. //typedef unsigned char byte; void shuffle(long long int *array, long long int n) { if (n > 1){ long long int i; for (i = 0; i < n - 1; i++){ long long int j = i + rand() / (RAND_MAX / (n - i) + 1); long long int t = array[j]; array[j] = array[i]; array[i] = t; } } } void init_cpu_data(unsigned *A, unsigned size, unsigned stride, unsigned mod, long long int iterations){ if(0){////////////normal for (unsigned i = 0; i < size - stride; i = i + stride){ A[i]=(i + stride); } for (unsigned i = 7; i < size - stride; i = i + stride){ A[i]=(i + stride); } A[size - stride]=0; A[size - stride + 7]=0; } if(1){////////////reversed for (unsigned i = 0; i <= size - stride; i = i + stride){ A[i]=(i - stride); } for (unsigned i = 7; i <= size - stride + 7; i = i + stride){ A[i]=(i - stride); } A[0]=size - stride; A[7]=size - stride + 7; } if(0){////////////random long long int *rand_sequence; rand_sequence = (long long int*)malloc(sizeof(long long int) * iterations); //////random sequence offset 0 for(long long int i = 0; i < iterations; i++){ rand_sequence[i] = i; } //srand (time(NULL)); srand(1); shuffle(rand_sequence, iterations); long long int previous_rand_num; long long int rand_num = rand_sequence[0] * stride; for(long long int i = 1; i < iterations; i++){ previous_rand_num = rand_num; rand_num = rand_sequence[i] * stride; A[previous_rand_num]=(unsigned)rand_num; } A[rand_num]=(unsigned)(rand_sequence[0] * stride);////back to beginning //////random sequence offset 7 //for(int i = 0; i < iterations; i++){ // rand_sequence[i] = i; //} //srand (time(NULL)); //shuffle(rand_sequence, iterations); rand_num = rand_sequence[0] * stride + 7; for(long long int i = 1; i < iterations; i++){ previous_rand_num = rand_num; rand_num = rand_sequence[i] * stride + 7; A[previous_rand_num]=(unsigned)rand_num; } A[rand_num]=(unsigned)(rand_sequence[0] * stride + 7);////back to beginning } /* ///////manually set the nodes A[32]=104333344; A[104333344]=200802336; A[200802336]=353370144; A[353370144]=372244512; A[372244512]=110100512; A[110100512]=182452256; A[182452256]=333971488; A[333971488]=225443872; A[225443872]=155189280; A[155189280]=104333344; */ } __device__ void P_chasing0(int mark, unsigned *A, int iterations, int *B, int *C, unsigned *D, int starting_index, float clock_rate, int data_stride){ int j = starting_index;/////make them in the same page, and miss near in cache lines for (int it = 0; it < iterations; it++){ j = A[j]; } B[0] = j; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing1(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){ unsigned j = starting_index;/////make them in the same page, and miss near in cache lines //unsigned start_time = 0;//////clock //unsigned end_time = 0;//////clock //start_time = clock64();//////clock for (long long int it = 0; it < iterations; it++){ j = A[j]; } //end_time=clock64();//////clock //unsigned total_time = end_time - start_time;//////clock //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! ( B[0] = j; //B[1] = (int) total_time; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing2(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){//////what is the effect of warmup outside vs inside? //////shared memory: 0xc000 max (49152 Bytes = 48KB) __shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations. __shared__ unsigned s_index[1024 * 4]; //__shared__ unsigned s_index[1]; unsigned j = starting_index;/////make them in the same page, and miss near in cache lines //int j = B[0]; long long int start_time = 0;//////clock long long int end_time = 0;//////clock long long int time_interval = 0;//////clock //unsigned total_time = end_time - start_time;//////clock /* for (int it = 0; it < iterations; it++){ start_time = clock64();//////clock j = A[j]; //s_index[it] = j; end_time=clock64();//////clock s_tvalue[it] = end_time - start_time; } */ asm(".reg .u64 t1;\n\t" ".reg .u64 t2;\n\t"); for (long long int it = 0; it < iterations; it++){ /* asm("mul.wide.u32 t1, %3, %5;\n\t" "add.u64 t2, t1, %4;\n\t" "mov.u64 %0, %clock64;\n\t" "ld.global.u32 %2, [t2];\n\t" "mov.u64 %1, %clock64;" : "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4)); */ asm("mul.wide.u32 t1, %2, %4;\n\t" "add.u64 t2, t1, %3;\n\t" "mov.u64 %0, %clock64;\n\t" "ld.global.u32 %1, [t2];\n\t" : "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4)); s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add). asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time)); time_interval = end_time - start_time; //if(it >= 4 * 1024){ s_tvalue[it] = time_interval; //} } //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency B[0] = j; for (long long int it = 0; it < iterations; it++){ C[it] = s_index[it]; D[it] = s_tvalue[it]; } } __global__ void tlb_latency_test(unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, float clock_rate, unsigned mod, int data_stride){ long long int reduced_iter = iterations; if(reduced_iter > 512){ reduced_iter = 512; }else if(reduced_iter < 16){ reduced_iter = 16; } ///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory. //P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2 P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data __syncthreads(); } int main(int argc, char **argv) { printf("\n"); // set device hipDeviceProp_t device_prop; //int dev_id = findCudaDevice(argc, (const char **) argv); int dev_id = 0; checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id)); int peak_clk = 1;//kHz checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id)); float clock_rate = (float) peak_clk; //printf("clock_rate_out_kernel:%f\n", clock_rate); if (!device_prop.managedMemory) { // This samples requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } if (device_prop.computeMode == hipComputeModeProhibited) { // This sample requires being run with a default or process exclusive mode fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n"); exit(EXIT_WAIVED); } ///////////////////////////////////////////////////////////////////GPU data out unsigned *GPU_data_out; checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(unsigned) * 2)); FILE * pFile; pFile = fopen ("output.txt","w"); unsigned counter = 0; for(unsigned data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride //data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit. //printf("###################data_stride%d#########################\n", data_stride); //for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines. for(unsigned mod2 = 1 * 16 * 1024; mod2 <= 1073741824; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb. counter++; ///////////////////////////////////////////////////////////////////CPU data begin //int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages. unsigned mod = mod2; if(mod > 2684354560){ mod = 2684354560; } //unsigned data_size = 2684354560;//////when size gets larger than 32MB(8388608), an additional latency is added. Is it prefetching? cpu cache or tlb? unsigned data_size = mod; if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch data_size = 4194304; } //int iterations = data_size / data_stride; //int iterations = 1024 * 256 * 8; long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 unsigned *CPU_data_in; //CPU_data_in = (int*)malloc(sizeof(int) * data_size); checkCudaErrors(hipHostMalloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, hipHostMallocDefault));//////////using pinned memory init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations); long long int reduced_iter = iterations; if(reduced_iter > 512){ reduced_iter = 512; }else if(reduced_iter < 16){ reduced_iter = 16; } unsigned *CPU_data_out_index; CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter); long long int *CPU_data_out_time; CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter); ///////////////////////////////////////////////////////////////////CPU data end ///////////////////////////////////////////////////////////////////GPU data in //int *GPU_data_in; //checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size)); //hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////GPU data out unsigned *GPU_data_out_index; checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter)); long long int *GPU_data_out_time; checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter)); hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here hipDeviceSynchronize(); hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost); hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost); fprintf(pFile, "###################data_stride%d#########################\n", data_stride); fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations); for (long long int it = 0; it < reduced_iter; it++){ fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]); //fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate); //printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate); } checkCudaErrors(hipFree(GPU_data_out_index)); checkCudaErrors(hipFree(GPU_data_out_time)); //checkCudaErrors(hipFree(GPU_data_in)); //checkCudaErrors(hipFree(CPU_data_in)); checkCudaErrors(hipHostFree(CPU_data_in));//////using pinned memory //free(CPU_data_in); free(CPU_data_out_index); free(CPU_data_out_time); } for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb. counter++; ///////////////////////////////////////////////////////////////////CPU data begin //int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages. unsigned mod = 2147483648; if(mod > 3221225472){ mod = 3221225472; } //unsigned data_size = 2684354560; unsigned data_size = mod; if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch data_size = 4194304; } //int iterations = data_size / data_stride; //int iterations = 1024 * 256 * 8; long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 unsigned *CPU_data_in; //CPU_data_in = (int*)malloc(sizeof(int) * data_size); checkCudaErrors(hipHostMalloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, hipHostMallocDefault));//////////using pinned memory init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations); long long int reduced_iter = iterations; if(reduced_iter > 512){ reduced_iter = 512; }else if(reduced_iter < 16){ reduced_iter = 16; } unsigned *CPU_data_out_index; CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter); long long int *CPU_data_out_time; CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter); ///////////////////////////////////////////////////////////////////CPU data end ///////////////////////////////////////////////////////////////////GPU data in //int *GPU_data_in; //checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size)); //hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////GPU data out unsigned *GPU_data_out_index; checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter)); long long int *GPU_data_out_time; checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter)); hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here hipDeviceSynchronize(); hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost); hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost); fprintf(pFile, "###################data_stride%d#########################\n", data_stride); fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations); for (long long int it = 0; it < reduced_iter; it++){ fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]); //fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate); //printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate); } checkCudaErrors(hipFree(GPU_data_out_index)); checkCudaErrors(hipFree(GPU_data_out_time)); //checkCudaErrors(hipFree(GPU_data_in)); //checkCudaErrors(hipFree(CPU_data_in)); checkCudaErrors(hipHostFree(CPU_data_in));//////using pinned memory //free(CPU_data_in); free(CPU_data_out_index); free(CPU_data_out_time); } for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb. counter++; ///////////////////////////////////////////////////////////////////CPU data begin //int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages. unsigned mod = 2684354560; if(mod > 2684354560){ mod = 2684354560; } //unsigned data_size = 2684354560; unsigned data_size = mod; if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch data_size = 4194304; } //int iterations = data_size / data_stride; //int iterations = 1024 * 256 * 8; long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 unsigned *CPU_data_in; //CPU_data_in = (int*)malloc(sizeof(int) * data_size); checkCudaErrors(hipHostMalloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, hipHostMallocDefault));//////////using pinned memory init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations); long long int reduced_iter = iterations; if(reduced_iter > 512){ reduced_iter = 512; }else if(reduced_iter < 16){ reduced_iter = 16; } unsigned *CPU_data_out_index; CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter); long long int *CPU_data_out_time; CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter); ///////////////////////////////////////////////////////////////////CPU data end ///////////////////////////////////////////////////////////////////GPU data in //int *GPU_data_in; //checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size)); //hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////GPU data out unsigned *GPU_data_out_index; checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter)); long long int *GPU_data_out_time; checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter)); hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here hipDeviceSynchronize(); hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost); hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost); fprintf(pFile, "###################data_stride%d#########################\n", data_stride); fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations); for (long long int it = 0; it < reduced_iter; it++){ fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]); //fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate); //printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate); } checkCudaErrors(hipFree(GPU_data_out_index)); checkCudaErrors(hipFree(GPU_data_out_time)); //checkCudaErrors(hipFree(GPU_data_in)); //checkCudaErrors(hipFree(CPU_data_in)); checkCudaErrors(hipHostFree(CPU_data_in));//////using pinned memory //free(CPU_data_in); free(CPU_data_out_index); free(CPU_data_out_time); } //printf("############################################\n\n"); } checkCudaErrors(hipFree(GPU_data_out)); //free(CPU_data_out); fclose (pFile); exit(EXIT_SUCCESS); }
1700067fe3be7dddfba8dea8d834a43dcf8390f5.cu
#include <cstdio> #include <ctime> #include <vector> #include <algorithm> #include <stdlib.h> // utilities #include <helper_cuda.h> #include <time.h> ///////////per request timing. L1 enabled. P100. ///////////using more than 8gb. //typedef unsigned char byte; void shuffle(long long int *array, long long int n) { if (n > 1){ long long int i; for (i = 0; i < n - 1; i++){ long long int j = i + rand() / (RAND_MAX / (n - i) + 1); long long int t = array[j]; array[j] = array[i]; array[i] = t; } } } void init_cpu_data(unsigned *A, unsigned size, unsigned stride, unsigned mod, long long int iterations){ if(0){////////////normal for (unsigned i = 0; i < size - stride; i = i + stride){ A[i]=(i + stride); } for (unsigned i = 7; i < size - stride; i = i + stride){ A[i]=(i + stride); } A[size - stride]=0; A[size - stride + 7]=0; } if(1){////////////reversed for (unsigned i = 0; i <= size - stride; i = i + stride){ A[i]=(i - stride); } for (unsigned i = 7; i <= size - stride + 7; i = i + stride){ A[i]=(i - stride); } A[0]=size - stride; A[7]=size - stride + 7; } if(0){////////////random long long int *rand_sequence; rand_sequence = (long long int*)malloc(sizeof(long long int) * iterations); //////random sequence offset 0 for(long long int i = 0; i < iterations; i++){ rand_sequence[i] = i; } //srand (time(NULL)); srand(1); shuffle(rand_sequence, iterations); long long int previous_rand_num; long long int rand_num = rand_sequence[0] * stride; for(long long int i = 1; i < iterations; i++){ previous_rand_num = rand_num; rand_num = rand_sequence[i] * stride; A[previous_rand_num]=(unsigned)rand_num; } A[rand_num]=(unsigned)(rand_sequence[0] * stride);////back to beginning //////random sequence offset 7 //for(int i = 0; i < iterations; i++){ // rand_sequence[i] = i; //} //srand (time(NULL)); //shuffle(rand_sequence, iterations); rand_num = rand_sequence[0] * stride + 7; for(long long int i = 1; i < iterations; i++){ previous_rand_num = rand_num; rand_num = rand_sequence[i] * stride + 7; A[previous_rand_num]=(unsigned)rand_num; } A[rand_num]=(unsigned)(rand_sequence[0] * stride + 7);////back to beginning } /* ///////manually set the nodes A[32]=104333344; A[104333344]=200802336; A[200802336]=353370144; A[353370144]=372244512; A[372244512]=110100512; A[110100512]=182452256; A[182452256]=333971488; A[333971488]=225443872; A[225443872]=155189280; A[155189280]=104333344; */ } __device__ void P_chasing0(int mark, unsigned *A, int iterations, int *B, int *C, unsigned *D, int starting_index, float clock_rate, int data_stride){ int j = starting_index;/////make them in the same page, and miss near in cache lines for (int it = 0; it < iterations; it++){ j = A[j]; } B[0] = j; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing1(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){ unsigned j = starting_index;/////make them in the same page, and miss near in cache lines //unsigned start_time = 0;//////clock //unsigned end_time = 0;//////clock //start_time = clock64();//////clock for (long long int it = 0; it < iterations; it++){ j = A[j]; } //end_time=clock64();//////clock //unsigned total_time = end_time - start_time;//////clock //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! ( B[0] = j; //B[1] = (int) total_time; } //////////min page size 4kb = 4096b = 32 * 128. __device__ void P_chasing2(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){//////what is the effect of warmup outside vs inside? //////shared memory: 0xc000 max (49152 Bytes = 48KB) __shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations. __shared__ unsigned s_index[1024 * 4]; //__shared__ unsigned s_index[1]; unsigned j = starting_index;/////make them in the same page, and miss near in cache lines //int j = B[0]; long long int start_time = 0;//////clock long long int end_time = 0;//////clock long long int time_interval = 0;//////clock //unsigned total_time = end_time - start_time;//////clock /* for (int it = 0; it < iterations; it++){ start_time = clock64();//////clock j = A[j]; //s_index[it] = j; end_time=clock64();//////clock s_tvalue[it] = end_time - start_time; } */ asm(".reg .u64 t1;\n\t" ".reg .u64 t2;\n\t"); for (long long int it = 0; it < iterations; it++){ /* asm("mul.wide.u32 t1, %3, %5;\n\t" "add.u64 t2, t1, %4;\n\t" "mov.u64 %0, %clock64;\n\t" "ld.global.u32 %2, [t2];\n\t" "mov.u64 %1, %clock64;" : "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4)); */ asm("mul.wide.u32 t1, %2, %4;\n\t" "add.u64 t2, t1, %3;\n\t" "mov.u64 %0, %clock64;\n\t" "ld.global.u32 %1, [t2];\n\t" : "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4)); s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add). asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time)); time_interval = end_time - start_time; //if(it >= 4 * 1024){ s_tvalue[it] = time_interval; //} } //printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency B[0] = j; for (long long int it = 0; it < iterations; it++){ C[it] = s_index[it]; D[it] = s_tvalue[it]; } } __global__ void tlb_latency_test(unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, float clock_rate, unsigned mod, int data_stride){ long long int reduced_iter = iterations; if(reduced_iter > 512){ reduced_iter = 512; }else if(reduced_iter < 16){ reduced_iter = 16; } ///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory. //P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2 P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data __syncthreads(); } int main(int argc, char **argv) { printf("\n"); // set device cudaDeviceProp device_prop; //int dev_id = findCudaDevice(argc, (const char **) argv); int dev_id = 0; checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id)); int peak_clk = 1;//kHz checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id)); float clock_rate = (float) peak_clk; //printf("clock_rate_out_kernel:%f\n", clock_rate); if (!device_prop.managedMemory) { // This samples requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } if (device_prop.computeMode == cudaComputeModeProhibited) { // This sample requires being run with a default or process exclusive mode fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n"); exit(EXIT_WAIVED); } ///////////////////////////////////////////////////////////////////GPU data out unsigned *GPU_data_out; checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(unsigned) * 2)); FILE * pFile; pFile = fopen ("output.txt","w"); unsigned counter = 0; for(unsigned data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride //data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit. //printf("###################data_stride%d#########################\n", data_stride); //for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines. for(unsigned mod2 = 1 * 16 * 1024; mod2 <= 1073741824; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb. counter++; ///////////////////////////////////////////////////////////////////CPU data begin //int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages. unsigned mod = mod2; if(mod > 2684354560){ mod = 2684354560; } //unsigned data_size = 2684354560;//////when size gets larger than 32MB(8388608), an additional latency is added. Is it prefetching? cpu cache or tlb? unsigned data_size = mod; if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch data_size = 4194304; } //int iterations = data_size / data_stride; //int iterations = 1024 * 256 * 8; long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 unsigned *CPU_data_in; //CPU_data_in = (int*)malloc(sizeof(int) * data_size); checkCudaErrors(cudaHostAlloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, cudaHostAllocDefault));//////////using pinned memory init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations); long long int reduced_iter = iterations; if(reduced_iter > 512){ reduced_iter = 512; }else if(reduced_iter < 16){ reduced_iter = 16; } unsigned *CPU_data_out_index; CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter); long long int *CPU_data_out_time; CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter); ///////////////////////////////////////////////////////////////////CPU data end ///////////////////////////////////////////////////////////////////GPU data in //int *GPU_data_in; //checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size)); //cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////GPU data out unsigned *GPU_data_out_index; checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter)); long long int *GPU_data_out_time; checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter)); tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here cudaDeviceSynchronize(); cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost); cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost); fprintf(pFile, "###################data_stride%d#########################\n", data_stride); fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations); for (long long int it = 0; it < reduced_iter; it++){ fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]); //fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate); //printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate); } checkCudaErrors(cudaFree(GPU_data_out_index)); checkCudaErrors(cudaFree(GPU_data_out_time)); //checkCudaErrors(cudaFree(GPU_data_in)); //checkCudaErrors(cudaFree(CPU_data_in)); checkCudaErrors(cudaFreeHost(CPU_data_in));//////using pinned memory //free(CPU_data_in); free(CPU_data_out_index); free(CPU_data_out_time); } for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb. counter++; ///////////////////////////////////////////////////////////////////CPU data begin //int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages. unsigned mod = 2147483648; if(mod > 3221225472){ mod = 3221225472; } //unsigned data_size = 2684354560; unsigned data_size = mod; if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch data_size = 4194304; } //int iterations = data_size / data_stride; //int iterations = 1024 * 256 * 8; long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 unsigned *CPU_data_in; //CPU_data_in = (int*)malloc(sizeof(int) * data_size); checkCudaErrors(cudaHostAlloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, cudaHostAllocDefault));//////////using pinned memory init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations); long long int reduced_iter = iterations; if(reduced_iter > 512){ reduced_iter = 512; }else if(reduced_iter < 16){ reduced_iter = 16; } unsigned *CPU_data_out_index; CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter); long long int *CPU_data_out_time; CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter); ///////////////////////////////////////////////////////////////////CPU data end ///////////////////////////////////////////////////////////////////GPU data in //int *GPU_data_in; //checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size)); //cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////GPU data out unsigned *GPU_data_out_index; checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter)); long long int *GPU_data_out_time; checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter)); tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here cudaDeviceSynchronize(); cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost); cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost); fprintf(pFile, "###################data_stride%d#########################\n", data_stride); fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations); for (long long int it = 0; it < reduced_iter; it++){ fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]); //fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate); //printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate); } checkCudaErrors(cudaFree(GPU_data_out_index)); checkCudaErrors(cudaFree(GPU_data_out_time)); //checkCudaErrors(cudaFree(GPU_data_in)); //checkCudaErrors(cudaFree(CPU_data_in)); checkCudaErrors(cudaFreeHost(CPU_data_in));//////using pinned memory //free(CPU_data_in); free(CPU_data_out_index); free(CPU_data_out_time); } for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb. counter++; ///////////////////////////////////////////////////////////////////CPU data begin //int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages. unsigned mod = 2684354560; if(mod > 2684354560){ mod = 2684354560; } //unsigned data_size = 2684354560; unsigned data_size = mod; if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch data_size = 4194304; } //int iterations = data_size / data_stride; //int iterations = 1024 * 256 * 8; long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 unsigned *CPU_data_in; //CPU_data_in = (int*)malloc(sizeof(int) * data_size); checkCudaErrors(cudaHostAlloc((void**)&CPU_data_in, sizeof(unsigned) * data_size, cudaHostAllocDefault));//////////using pinned memory init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations); long long int reduced_iter = iterations; if(reduced_iter > 512){ reduced_iter = 512; }else if(reduced_iter < 16){ reduced_iter = 16; } unsigned *CPU_data_out_index; CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter); long long int *CPU_data_out_time; CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter); ///////////////////////////////////////////////////////////////////CPU data end ///////////////////////////////////////////////////////////////////GPU data in //int *GPU_data_in; //checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size)); //cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////GPU data out unsigned *GPU_data_out_index; checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter)); long long int *GPU_data_out_time; checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter)); tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here cudaDeviceSynchronize(); cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost); cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost); fprintf(pFile, "###################data_stride%d#########################\n", data_stride); fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations); for (long long int it = 0; it < reduced_iter; it++){ fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]); //fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate); //printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate); } checkCudaErrors(cudaFree(GPU_data_out_index)); checkCudaErrors(cudaFree(GPU_data_out_time)); //checkCudaErrors(cudaFree(GPU_data_in)); //checkCudaErrors(cudaFree(CPU_data_in)); checkCudaErrors(cudaFreeHost(CPU_data_in));//////using pinned memory //free(CPU_data_in); free(CPU_data_out_index); free(CPU_data_out_time); } //printf("############################################\n\n"); } checkCudaErrors(cudaFree(GPU_data_out)); //free(CPU_data_out); fclose (pFile); exit(EXIT_SUCCESS); }
65baa86ea95eba30cc638c475e128c426ec5508d.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------- * * CUDA functions for texture-memory interpolation based projection * * This file has the necesary functions to perform X-ray parallel projection * operation given a geaometry, angles and image. It uses the 3D texture * memory linear interpolation to uniformily sample a path to integrate the * X-rays. * * CODE by Ander Biguri * --------------------------------------------------------------------------- --------------------------------------------------------------------------- Copyright (c) 2015, University of Bath and CERN- European Organization for Nuclear Research All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------- Contact: [email protected] Codes : https://github.com/CERN/TIGRE --------------------------------------------------------------------------- */ #include <algorithm> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "ray_interpolated_projection_parallel.hpp" //#include "mex.h" #include <math.h> // if (__err != hipSuccess) { \ // printf("%s \n", msg);\ // printf("%s \n", hipGetErrorString(__err));\ // } \ // TODO: Error logging #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ } while (0) // Declare the texture reference. texture<float, hipTextureType3D , hipReadModeElementType> tex; #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ __global__ void kernelPixelDetector_parallel( Geometry geo, float* detector, Point3D source , Point3D deltaU, Point3D deltaV, Point3D uvOrigin, float maxdist){ unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) | (y>= geo.nDetecV)) return; /////// Get coordinates XYZ of pixel UV int pixelV = geo.nDetecV-y-1; int pixelU = x; float vectX,vectY,vectZ; Point3D P; P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); Point3D S; S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x); S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y); S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z); // Length is the ray length in normalized space double length=sqrt((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z)); //now legth is an integer of Nsamples that are required on this line length=ceil(length/geo.accuracy);//Divide the directional vector by an integer vectX=(P.x -S.x)/(length); vectY=(P.y -S.y)/(length); vectZ=(P.z -S.z)/(length); // //Integrate over the line float tx,ty,tz; float sum=0; float i; // limit the amount of mem access after the cube, but before the detector. if ((geo.DSO/geo.dVoxelX+maxdist)/geo.accuracy < length) length=ceil((geo.DSO/geo.dVoxelX+maxdist)/geo.accuracy); //Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel") for (i=floor(maxdist/geo.accuracy); i<=length; i=i+1){ tx=vectX*i+S.x; ty=vectY*i+S.y; tz=vectZ*i+S.z; sum += tex3D(tex, tx+0.5, ty+0.5, tz+0.5); // this line is 94% of time. } float deltalength=sqrt((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+ (vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) ); detector[idx]=sum*deltalength; } int interpolation_projection_parallel(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){ // copy data to CUDA memory hipArray *d_imagedata = 0; const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipMalloc3DArray(&d_imagedata, &channelDesc, extent); cudaCheckErrors("hipMalloc3D error 3D tex"); hipMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_hipPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_imagedata; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); cudaCheckErrors("hipMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = hipFilterModeLinear; tex.addressMode[0] = hipAddressModeBorder; tex.addressMode[1] = hipAddressModeBorder; tex.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex, d_imagedata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); //Done! Image put into texture memory. size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float); float* dProjection; hipMalloc((void**)&dProjection, num_bytes); cudaCheckErrors("hipMalloc fail"); // If we are going to time bool timekernel=false; hipEvent_t start, stop; float elapsedTime; if (timekernel){ hipEventCreate(&start); hipEventRecord(start,0); } // 16x16 gave the best performance empirically // Funnily that makes it compatible with most GPUs..... dim3 grid(ceil((float)geo.nDetecU/32),ceil((float)geo.nDetecV/32),1); dim3 block(32,32,1); Point3D source, deltaU, deltaV, uvOrigin; float maxdist; for (unsigned int i=0;i<nalpha;i++){ geo.alpha=alphas[i]; //precomute distances for faster execution maxdist=maxDistanceCubeXY(geo,geo.alpha,i); //Precompute per angle constant stuff for speed computeDeltas_parallel(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source); //Interpolation!! hipLaunchKernelGGL(( kernelPixelDetector_parallel), dim3(grid),dim3(block), 0, 0, geo,dProjection, source, deltaU, deltaV, uvOrigin,floor(maxdist)); cudaCheckErrors("Kernel fail"); // copy result to host hipMemcpy(result[i], dProjection, num_bytes, hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy fail"); } if (timekernel){ hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); //TODO: replace this // mexPrintf("%f\n" ,elapsedTime); } hipUnbindTexture(tex); cudaCheckErrors("Unbind fail"); hipFree(dProjection); hipFreeArray(d_imagedata); cudaCheckErrors("hipFree d_imagedata fail"); //hipDeviceReset(); return 0; } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas_parallel(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO; S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: //1: Offset detector //P.x P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i]; Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i]; Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i]; //S doesnt need to chagne //3: Rotate (around z)! Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z; Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z; Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z; Point3D S2; S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha); S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha); S2.z=S.z; //2: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; S2.x =S2.x+geo.sVoxelX/2-geo.dVoxelX/2; S2.y =S2.y+geo.sVoxelY/2-geo.dVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2-geo.dVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ; //5. apply COR. Wherever everything was, now its offesetd by a bit float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S2.x+=CORx; S2.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S2; }
65baa86ea95eba30cc638c475e128c426ec5508d.cu
/*------------------------------------------------------------------------- * * CUDA functions for texture-memory interpolation based projection * * This file has the necesary functions to perform X-ray parallel projection * operation given a geaometry, angles and image. It uses the 3D texture * memory linear interpolation to uniformily sample a path to integrate the * X-rays. * * CODE by Ander Biguri * --------------------------------------------------------------------------- --------------------------------------------------------------------------- Copyright (c) 2015, University of Bath and CERN- European Organization for Nuclear Research All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------- Contact: [email protected] Codes : https://github.com/CERN/TIGRE --------------------------------------------------------------------------- */ #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> #include "ray_interpolated_projection_parallel.hpp" //#include "mex.h" #include <math.h> // if (__err != cudaSuccess) { \ // printf("%s \n", msg);\ // printf("%s \n", cudaGetErrorString(__err));\ // } \ // TODO: Error logging #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ } while (0) // Declare the texture reference. texture<float, cudaTextureType3D , cudaReadModeElementType> tex; #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ __global__ void kernelPixelDetector_parallel( Geometry geo, float* detector, Point3D source , Point3D deltaU, Point3D deltaV, Point3D uvOrigin, float maxdist){ unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) | (y>= geo.nDetecV)) return; /////// Get coordinates XYZ of pixel UV int pixelV = geo.nDetecV-y-1; int pixelU = x; float vectX,vectY,vectZ; Point3D P; P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); Point3D S; S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x); S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y); S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z); // Length is the ray length in normalized space double length=sqrt((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z)); //now legth is an integer of Nsamples that are required on this line length=ceil(length/geo.accuracy);//Divide the directional vector by an integer vectX=(P.x -S.x)/(length); vectY=(P.y -S.y)/(length); vectZ=(P.z -S.z)/(length); // //Integrate over the line float tx,ty,tz; float sum=0; float i; // limit the amount of mem access after the cube, but before the detector. if ((geo.DSO/geo.dVoxelX+maxdist)/geo.accuracy < length) length=ceil((geo.DSO/geo.dVoxelX+maxdist)/geo.accuracy); //Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel") for (i=floor(maxdist/geo.accuracy); i<=length; i=i+1){ tx=vectX*i+S.x; ty=vectY*i+S.y; tz=vectZ*i+S.z; sum += tex3D(tex, tx+0.5, ty+0.5, tz+0.5); // this line is 94% of time. } float deltalength=sqrt((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+ (vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) ); detector[idx]=sum*deltalength; } int interpolation_projection_parallel(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){ // copy data to CUDA memory cudaArray *d_imagedata = 0; const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaMalloc3DArray(&d_imagedata, &channelDesc, extent); cudaCheckErrors("cudaMalloc3D error 3D tex"); cudaMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_cudaPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_imagedata; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); cudaCheckErrors("cudaMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = cudaFilterModeLinear; tex.addressMode[0] = cudaAddressModeBorder; tex.addressMode[1] = cudaAddressModeBorder; tex.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex, d_imagedata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); //Done! Image put into texture memory. size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float); float* dProjection; cudaMalloc((void**)&dProjection, num_bytes); cudaCheckErrors("cudaMalloc fail"); // If we are going to time bool timekernel=false; cudaEvent_t start, stop; float elapsedTime; if (timekernel){ cudaEventCreate(&start); cudaEventRecord(start,0); } // 16x16 gave the best performance empirically // Funnily that makes it compatible with most GPUs..... dim3 grid(ceil((float)geo.nDetecU/32),ceil((float)geo.nDetecV/32),1); dim3 block(32,32,1); Point3D source, deltaU, deltaV, uvOrigin; float maxdist; for (unsigned int i=0;i<nalpha;i++){ geo.alpha=alphas[i]; //precomute distances for faster execution maxdist=maxDistanceCubeXY(geo,geo.alpha,i); //Precompute per angle constant stuff for speed computeDeltas_parallel(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source); //Interpolation!! kernelPixelDetector_parallel<<<grid,block>>>(geo,dProjection, source, deltaU, deltaV, uvOrigin,floor(maxdist)); cudaCheckErrors("Kernel fail"); // copy result to host cudaMemcpy(result[i], dProjection, num_bytes, cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy fail"); } if (timekernel){ cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); //TODO: replace this // mexPrintf("%f\n" ,elapsedTime); } cudaUnbindTexture(tex); cudaCheckErrors("Unbind fail"); cudaFree(dProjection); cudaFreeArray(d_imagedata); cudaCheckErrors("cudaFree d_imagedata fail"); //cudaDeviceReset(); return 0; } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas_parallel(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO; S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: //1: Offset detector //P.x P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i]; Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i]; Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i]; //S doesnt need to chagne //3: Rotate (around z)! Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z; Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z; Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z; Point3D S2; S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha); S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha); S2.z=S.z; //2: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; S2.x =S2.x+geo.sVoxelX/2-geo.dVoxelX/2; S2.y =S2.y+geo.sVoxelY/2-geo.dVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2-geo.dVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ; //5. apply COR. Wherever everything was, now its offesetd by a bit float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S2.x+=CORx; S2.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S2; }
764ac46bdbdae1e8056da37aecf5285e55edb368.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void padding_nm2v( float *nm2v_re, float *nm2v_im, int nfermi, int norbs, int nvirt, int vstart) { int i = blockIdx.x * blockDim.x + threadIdx.x; //nocc int j = blockIdx.y * blockDim.y + threadIdx.y; //nvirt if (i > vstart && i < nfermi) { if ( j < norbs - vstart ) { nm2v_re[i*nvirt + j] = 0.0; nm2v_im[i*nvirt + j] = 0.0; } } }
764ac46bdbdae1e8056da37aecf5285e55edb368.cu
#include "includes.h" __global__ void padding_nm2v( float *nm2v_re, float *nm2v_im, int nfermi, int norbs, int nvirt, int vstart) { int i = blockIdx.x * blockDim.x + threadIdx.x; //nocc int j = blockIdx.y * blockDim.y + threadIdx.y; //nvirt if (i > vstart && i < nfermi) { if ( j < norbs - vstart ) { nm2v_re[i*nvirt + j] = 0.0; nm2v_im[i*nvirt + j] = 0.0; } } }
52ea91875f64e547d71e309ec64e0b481cf13525.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <io.h> #include <stdlib.h> #include <string.h> #include <float.h> #include "utility.h" #include "utility.c" //#include "Trade.cuh" #include <time.h> #include <malloc.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> //#include "helper_cuda.h" //#include "helper_functions.h" #pragma warning( disable : 4996 ) //Declaring macros and constants in pre-processor - STEP A//INSERT NEW STUFF DOWN HERE EACH TIME START SUB-FUNCTION******PRE-PROCESSOR AREA*******// #define ABS(X) (X>=0?X:-X) #define MAX(X,Y) (X>=Y?X:Y) #define MIN(X,Y) (X<=Y?X:Y) #define SIGN(X) (X>=0?(X==0?0:1):-1) #define ROUND(X,Y) ((X>=0?(X<<1)+Y:(X<<1)-Y)/(Y<<1))*Y //Change path below for UNIX "c://usr" #define PATH "F:\\" #define LOOKBACK 1597 // 1597-987-610-377-144-89 fibos rolling optimization historical period #define STEP 377 // or 89 fibos step forward in time period for next rolling optimization #define NUMI 27 //up to 27 number of markets //INT MAIN//INSERT NEW STUFF HERE EACH TIME START SUB-FUNCTION*******MAIN AREA****//Declare each new variable here - initializing and declaring space/memory for return arrays of variables or output we want****STEP B// int main(int argc, char **argv){ //void main (int argc, char *argv[]){ FILE *recon, *fin, *ferr, *fins,*ferri; int *intperiod,*start_h,*stop_h,c, lens=0, combos=0,counters=0,starto=0,tachy=0,lenny=0; long *dt; float *zscores_d, *pnl_d, *pos_d, *rets_d,*a_d,*a_h,*pos,*pnl,*zscores,*rets; double *op,*hi,*lo,*p,*price, *smooth, *detrender, *period, *qu, *iu, *ji, *jq, *ib, *qb, *sib, *sqb,*re, *im,*sre,*sim,*speriod,*smperiod,*qc,*ic,*ric,*sig,*nois,*snr, *cumpnl, *sharp; double *sumi, *vari, *stdevi, *m_avei,*sumv, *varv, *stdevv, *m_avev, *dolls, *cumdolls, *sumip, *varip, *stdevip, *m_aveip, *sharpp; int i=0,combo=0,ii=0,zz=0,wins=0,counter=0,start=1,stop=0,*start_d,*stop_d,startf=0,stopf=0,beg=1,high=0,m=0,mm=0,gg=0; char desty[50],dest[50],desta[50],tmp[50],strs[50],foldr[50],fnum[50],fnums[50],dir[50]; int peri[100] = { { 0 } }; double pp[] = { 42000.00, 42000.00, 50.00, 20.00, 100.00, 100.00, 10.00, 25.00, 5.00, 1000.00, 1000.00, 2000.00, 1000.00, 2500.00, 100.00, 25000.00, 5000.00, 50.00, 100000.00, 125000.00, 125000.00, 125000.00, 62500.00, 50.00, 1000.00, 10000.00, 50.00 }; char *marks[] = {"RBOB","HO","SP", "ND", "EMD", "TF", "FESX", "FDAX", "NK", "US", "TY", "TU", "FGBL", "ED", "GC", "HG", "SI", "PL", "AD", "EC", "SF", "JY", "BP", "S", "CL", "NG", "C"}; char *dfiles[] = {PATH"data0.dat",PATH"data1.dat",PATH"data2.dat",PATH"data3.dat",PATH"data4.dat",PATH"data5.dat",PATH"data6.dat",PATH"data7.dat",PATH"data8.dat",PATH"data9.dat",PATH"data10.dat",PATH"data11.dat",PATH"data12.dat",PATH"data13.dat", PATH"data14.dat",PATH"data15.dat",PATH"data16.dat",PATH"data17.dat",PATH"data18.dat",PATH"data19.dat",PATH"data20.dat",PATH"data21.dat",PATH"data22.dat",PATH"data23.dat",PATH"data24.dat",PATH"data25.dat",PATH"data26.dat"}; clock_t t1, t2; float diff; char sources[60],source[60],line[100]; long end, endf; double a[] = { 1.25, 1.50 }; //array holder for parameter combinations later on aka "parameter sweeps" which GPU can greatly speed up// a[] is # standard deviations// double b[] = { 21.00, 34.00 }; //double b[] = { 3.0, 5.0, 8.0, 10.0 }; double lensa=sizeof(a)/sizeof(double); double maxi=0.00,mat=0.00; double lensb=sizeof(b)/sizeof(double); double lensc=lensa * lensb; //number of parameter combinations double sharplist[100][7][100] = { { 0 } }; double table[100][5]= { { 0 } }; double ocum[100][3][100]= { { 0 } }; int z=0, j=0, winos=0,lensz=0; double sumss[100] = { { 0 } }; double avv=0.00, sharpie=0.00, sharpies=0.00; t1 = clock(); recon=f_openw(PATH"recon.dat"); for (gg = 1; gg <= NUMI; gg++) { // top loop for number of market data files passed thru dfiles[] // must change NUMI in #def as add number of markets // should I use STRUCT instead to store file names? sprintf(sources,dfiles[gg-1]); //find and open price data files to get lengths for periodicities// fins=f_openr(sources); endf=f_line(fins); endf--; peri[gg] = (int)(((endf-LOOKBACK)/STEP)+1); //number of rolling periods in each data set for rolling optimization (aka moving average)// f_close(sources,fins); sprintf(fnums, "%d", gg); strcpy(desta, PATH"OSrunALL"); //output directory for out-of-sample tests for all combined tests per market strcpy(foldr, ".dat"); strcat(desta,"-"); strcat(desta,fnums); strcat(desta,"-"); strcat(desta,marks[gg-1]); strcat(desta,foldr); ferri=f_openw(desta); for (ii = 1; ii <= peri[gg] ; ii++) { // loop is for periodicity - so 30yrs of price data divided into sub-units for rolling optimization (aka parameter sweeps) for (z = 0; z < lensa; z++) { // 2 nested for loops for parameter sweep or combination of arrays a[] and b[]// for (j = 0; j < lensb; j++) { lens = (int)(b[j]); mm=(lensa*z)+j; sprintf(strs, "%d", mm); sprintf(fnum, "%d", gg); sprintf(tmp, "%d", ii); strcpy(desty, PATH"ISout"); //output directory for in-sample tests strcpy(foldr, ".dat"); strcat(desty,strs); strcat(desty,"-"); strcat(desty,fnum); strcat(desty,"-"); strcat(desty,tmp); strcat(desty,"-"); strcat(desty,marks[gg-1]); strcat(desty,foldr); ferr=f_openw(desty); //find and open output file// sprintf(source,dfiles[gg-1]); //find and open price data file// fin=f_openr(source); end=f_line(fin); end--; start = beg + (STEP*(ii-1)); //start stop dates for inner loop ii for rolling period optimizations stop = LOOKBACK + (STEP*(ii-1)); dt=(long*) calloc(end+1,sizeof(long)); op=(double*) calloc(end+1,sizeof(double)); hi=(double*) calloc(end+1,sizeof(double)); lo=(double*) calloc(end+1,sizeof(double)); p=(double*) calloc(end+1,sizeof(double)); price=(double*) calloc(end+1,sizeof(double)); smooth=(double*) calloc(end+1,sizeof(double)); detrender=(double*) calloc(end+1,sizeof(double)); period=(double*) calloc(end+1,sizeof(double)); qu=(double*) calloc(end+1,sizeof(double)); iu=(double*) calloc(end+1,sizeof(double)); ji=(double*) calloc(end+1,sizeof(double)); jq=(double*) calloc(end+1,sizeof(double)); ib=(double*) calloc(end+1,sizeof(double)); qb=(double*) calloc(end+1,sizeof(double)); sib=(double*) calloc(end+1,sizeof(double)); sqb=(double*) calloc(end+1,sizeof(double)); re=(double*) calloc(end+1,sizeof(double)); im=(double*) calloc(end+1,sizeof(double)); sre=(double*) calloc(end+1,sizeof(double)); sim=(double*) calloc(end+1,sizeof(double)); speriod=(double*) calloc(end+1,sizeof(double)); smperiod=(double*) calloc(end+1,sizeof(double)); qc=(double*) calloc(end+1,sizeof(double)); ic=(double*) calloc(end+1,sizeof(double)); ric=(double*) calloc(end+1,sizeof(double)); intperiod=(int*) calloc(end+1,sizeof(double)); sig=(double*) calloc(end+1,sizeof(double)); nois=(double*) calloc(end+1,sizeof(double)); snr=(double*) calloc(end+1,sizeof(double)); //** ALLOCATE SPACE FOR MEMORY FOR CUDA-RELATED HOST VARIABLES - USE PINNED/SHARED MEMORY FOR MORE SPEED!!! ** //rets=(float*) calloc(end+1,sizeof(float)); hipHostMalloc(&rets, (int)(end)*sizeof(float), hipHostMallocDefault); //memset(rets, 0, (int)(end)*sizeof(float)); dolls=(double*) calloc(end+1,sizeof(double)); cumdolls=(double*) calloc(end+1,sizeof(double)); //pos=(float*) calloc(end+1,sizeof(float)); hipHostMalloc(&pos, (int)(end)*sizeof(float), hipHostMallocDefault); //memset(pos, 0, (int)(end)*sizeof(float)); //pnl=(float*) calloc(end+1,sizeof(float)); hipHostMalloc(&pnl, (int)(end)*sizeof(float), hipHostMallocDefault); //memset(pnl, 0, (int)(end)*sizeof(float)); cumpnl=(double*) calloc(end+1,sizeof(double)); //zscores=(float*) calloc(end+1,sizeof(float)); hipHostMalloc(&zscores, (int)(end)*sizeof(float), hipHostMallocDefault); //memset(zscores, 0, (int)(end)*sizeof(float)); sharpp=(double*) calloc(end+1,sizeof(double)); sumip=(double*) calloc(end+1,sizeof(double)); varip=(double*) calloc(end+1,sizeof(double)); stdevip=(double*) calloc(end+1,sizeof(double)); m_aveip=(double*) calloc(end+1,sizeof(double)); sharp=(double*) calloc(end+1,sizeof(double)); sumi=(double*) calloc(end+1,sizeof(double)); vari=(double*) calloc(end+1,sizeof(double)); stdevi=(double*) calloc(end+1,sizeof(double)); m_avei=(double*) calloc(end+1,sizeof(double)); sumv=(double*) calloc(end+1,sizeof(double)); varv=(double*) calloc(end+1,sizeof(double)); stdevv=(double*) calloc(end+1,sizeof(double)); m_avev=(double*) calloc(end+1,sizeof(double)); //** ALLOCATE SPACE FOR MEMORY FOR CUDA-RELATED DEVICE VARIABLES** hipMalloc((void**)&zscores_d, (int)(end)*sizeof(float)); //hipMemset(zscores_d, 0, (int)(end)*sizeof(float)); hipMalloc((void**)&pos_d, (int)(end)*sizeof(float)); hipMemset(pos_d, 0, (int)(end)*sizeof(float)); hipMalloc((void**)&pnl_d, (int)(end)*sizeof(float)); hipMemset(pnl_d, 0, (int)(end)*sizeof(float)); hipMalloc((void**)&rets_d, (int)(end)*sizeof(float)); //hipMemset(rets_d, 0, (int)(end)*sizeof(float)); /*hipMalloc((void**)&a_d, 1); hipMalloc((void**)&start_d, 1); hipMalloc((void**)&stop_d, 1);*/ //INSERT NEW STUFF HERE EACH TIME START SUB-FUNCTION*************///CALLING function//STEP C// i=0; while(fgets(line,100,fin)>0){ sscanf(line,"%ld %lf %lf %lf %lf",&dt[i],&op[i],&hi[i],&lo[i],&p[i]);i++;} //scan lines from data file and store in arrays - this is price data here// f_close(source,fin); //close data file //Using or CALLING function here// DO NOT NEED TO DEFINE INPUTS-OUTPUTS here - that is done at bottom down BELOW!!// ret(p, end, rets); //snf(op, lo, hi, end, price, smooth, detrender, period, qu, iu, ji, jq, ib, qb, sib, sqb, re, im, sre, sim, speriod, smperiod, qc, ic, ric, intperiod, sig, nois, snr); zscore(lens, p, sumv, varv, end, zscores, stdevv, m_avev); m = (lensa*z)+j; //** COPY CUDA VARIABLES FROM CPU (HOST) TO GPU (DEVICE) - USE ASYNC TRANSFER FOR MORE SPEED SO CPU DOES NOT HAVE TO WAIT FOR GPU TO FINISH OPERATION AND CAN PROCEED FURTHER IN THE MAIN PROGRAM** hipMemcpyAsync(zscores_d, zscores, (int)(end)*sizeof(float), hipMemcpyHostToDevice,0); hipMemcpyAsync(rets_d, rets, (int)(end)*sizeof(float), hipMemcpyHostToDevice,0); lenny=stop-start; dim3 threads; threads.x = 896; //use 896 threads as per specific GPU device for higher OCCUPANCY/USE OF CARD - trial-and-error via PROFILING //dim3 blocks; blocks.x = ((int)(end)/threads.x) + 1; //kernelSim<<<threads,blocks>>>(zscores_d,rets_d,pnl_d,pos_d,start,stop,a[z]); //** CALL GPU FUNCTION/KERNEL HERE FOR MODEL PARAMETER SWEEP TO GENERATE IN_SAMPLE RESULTS** kernelSim<<<threads,112>>>(zscores_d,rets_d,pnl_d,pos_d,start,stop,(float)(a[z]),lens); //** COPY CUDA VARIABLES/RESULTS FROM GPU (DEVICE) BACK TO CPU (HOST) - MUST WAIT FOR GPU OPERATION/FUNCTION TO FINISH HERE SINCE LOW ASYNC/CONCURRENCY ON NON_TESLA GPU DEVICES** hipMemcpy(pos, pos_d, (int)(end)*sizeof(float)/*stop-start*/, hipMemcpyDeviceToHost); hipMemcpy(pnl, pnl_d, (int)(end)*sizeof(float), hipMemcpyDeviceToHost); //for(i=start;i<stop;i++){ //IN-sample rolling optimization for old CPU CODE - NOW WE'RE USING GPU INSTEAD FOR MORE SPEED** // // if(zscores[i] > a[z]) pos[i] = 1.00; // if(zscores[i] < -a[z]) pos[i] = -1.00; // pnl[i] = (pos[i] * rets[i]); } sharpep(pnl, sumip, varip, start, stop, stdevip, m_aveip, sharpp); table[m][0] = m; table[m][1] = a[z]; table[m][2] = b[j]; table[m][3] = sharpp[stop-1]; sharpie = sharpie + sharpp[stop-1];//end? if (table[m][3] > 0.00) combo = combo + 1; counter=counter+1; //table[m][4] = cumpnl[stop-1]; printf("\nIS Test%.0f Market%d-%s Period%d", table[m][0],gg,marks[gg-1],ii); printf("\nSharpe\t%.2f", table[m][3]); printf("\nParam1\t%.2f", table[m][1]); printf("\nParam2\t%.0f", table[m][2]); //printf("\nCum Ret\t%.2f%%", table[m][4]*100); //printf("\nAnn Ret\t%.2f%%", (table[m][4]*100)/(LOOKBACK/260)); //printf("\nAnn Vol\t%.2f%%", ABS(((table[m][4]*100)/(LOOKBACK/260))/table[m][3])); printf("\nNum of Years: %.2f thru %.2f of %.2f total\n", ((((ii-1)*(double)(STEP)))/260),(((double)(LOOKBACK) + (ii*(double)(STEP)))/260)-(double)(STEP)/260,((double)(end)/260)); fprintf(recon,"\nIS Test%.0f Market%d-%s Period%d", table[m][0],gg,marks[gg-1],ii); fprintf(recon,"\nSharpe\t%.2f", table[m][3]); fprintf(recon,"\nParam1\t%.2f", table[m][1]); fprintf(recon,"\nParam2\t%.0f", table[m][2]); //fprintf(recon,"\nCum Ret\t%.2f%%", table[m][4]*100); //fprintf(recon,"\nAnn Ret\t%.2f%%", (table[m][4]*100)/(LOOKBACK/260)); //fprintf(recon,"\nAnn Vol\t%.2f%%", ABS(((table[m][4]*100)/(LOOKBACK/260))/table[m][3])); fprintf(recon,"\nNum of Years: %.2f thru %.2f of %.2f total\n", ((((ii-1)*(double)(STEP)))/260),(((double)(LOOKBACK) + (ii*(double)(STEP)))/260)-(double)(STEP)/260,((double)(end)/260)); for(i=start;i<stop;i++) { fprintf(ferr,"%ld\t %10.6lf\t %10.3lf\t %10.2lf\t %10.5lf\t %10.5lf\t %10.5lf\n",dt[i],p[i],zscores[i],pos[i],rets[i],pnl[i],sharpp[i]); } // f_close(desty,ferr); //close output file for (i = 0; i < lensc; i++) //find best sharpe ratio from table { if (table[i][3] > maxi) maxi = table[i][3]; if (maxi == table[i][3]) high=i; } sharplist[gg][0][ii] = high; //row of max sharpe recap sharplist[gg][6][ii] = table[high][3]; //max sharpe sharplist[gg][1][ii] = table[high][1]; //param 1 recap sharplist[gg][2][ii] = table[high][2]; //param 2 recap sharplist[gg][3][ii] = table[high][4]; //cum ret recap sharplist[gg][4][ii] = table[high][0]; //test number recap sharplist[gg][5][ii] = gg; //market number recap maxi=0.00; //ADD IN EACH POINTER VARIABLE HERE - FREEING UP SPACE IN MEMORY*******STEP D// //hipDeviceReset(); free(hi);free(lo);free(p);free(price);free(smooth);free(detrender);free(period);free(qu);free(iu);free(ji);free(jq);free(ib);free(qb);free(sib);free(re);free(im);free(sre);free(sim);free(speriod);free(smperiod);free(qc);free(ic);free(ric);free(intperiod);free(sig);free(nois);free(snr); hipHostFree(rets);hipHostFree(zscores);free(cumpnl);free(op);free(sharp);free(sumi);free(vari);free(stdevi);free(m_avei); free(sumv);free(varv);free(stdevv);free(m_avev);free(dolls);free(cumdolls);free(sharpp);free(sumip);free(varip);free(stdevip);free(m_aveip);//free(a_h);free(start_h);free(stop_h); hipFree(zscores_d);hipFree(pnl_d);hipFree(pos_d);hipFree(rets_d);hipHostFree(pos);hipHostFree(pnl); //hipHostFree(zscores_d);hipHostFree(pnl_d);hipHostFree(pos_d);hipHostFree(rets_d); } } avv = avv+sharplist[gg][6][ii]/peri[gg]; //avg max sharpe if (sharplist[gg][6][ii] > 0.00) wins = wins+ii/peri[gg]; //winning markets printf("\n%.2f Max Sharpe of Market%d-%s Period%d is Test %.0f with STD %.2f and SNR %.0f\n", sharplist[gg][6][ii],gg,marks[gg-1],ii,sharplist[gg][0][ii],sharplist[gg][1][ii],sharplist[gg][2][ii]); fprintf(recon,"\n%.2f Max Sharpe of Market%d-%s Period%d is Test %.0f with STD %.2f and SNR %.0f\n", sharplist[gg][6][ii],gg,marks[gg-1],ii,sharplist[gg][0][ii],sharplist[gg][1][ii],sharplist[gg][2][ii]); sprintf(fnum, "%d", gg); sprintf(tmp, "%d", ii); strcpy(dest, PATH"OSrun"); //output directory for out-of-sample tests strcpy(foldr, ".dat"); strcat(dest,"-"); strcat(dest,fnum); strcat(dest,"-"); strcat(dest,tmp); strcat(dest,"-"); strcat(dest,marks[gg-1]); strcat(dest,foldr); ferr=f_openw(dest); //find and open output file// sprintf(source,dfiles[gg-1]); //find and open price data file// fin=f_openr(source); end=f_line(fin); end--; dt=(long*) calloc(end+1,sizeof(long)); op=(double*) calloc(end+1,sizeof(double)); hi=(double*) calloc(end+1,sizeof(double)); lo=(double*) calloc(end+1,sizeof(double)); p=(double*) calloc(end+1,sizeof(double)); price=(double*) calloc(end+1,sizeof(double)); smooth=(double*) calloc(end+1,sizeof(double)); detrender=(double*) calloc(end+1,sizeof(double)); period=(double*) calloc(end+1,sizeof(double)); qu=(double*) calloc(end+1,sizeof(double)); iu=(double*) calloc(end+1,sizeof(double)); ji=(double*) calloc(end+1,sizeof(double)); jq=(double*) calloc(end+1,sizeof(double)); ib=(double*) calloc(end+1,sizeof(double)); qb=(double*) calloc(end+1,sizeof(double)); sib=(double*) calloc(end+1,sizeof(double)); sqb=(double*) calloc(end+1,sizeof(double)); re=(double*) calloc(end+1,sizeof(double)); im=(double*) calloc(end+1,sizeof(double)); sre=(double*) calloc(end+1,sizeof(double)); sim=(double*) calloc(end+1,sizeof(double)); speriod=(double*) calloc(end+1,sizeof(double)); smperiod=(double*) calloc(end+1,sizeof(double)); qc=(double*) calloc(end+1,sizeof(double)); ic=(double*) calloc(end+1,sizeof(double)); ric=(double*) calloc(end+1,sizeof(double)); intperiod=(int*) calloc(end+1,sizeof(double)); sig=(double*) calloc(end+1,sizeof(double)); nois=(double*) calloc(end+1,sizeof(double)); snr=(double*) calloc(end+1,sizeof(double)); //** ALLOCATE SPACE FOR MEMORY FOR CUDA-RELATED HOST VARIABLES - USE PINNED/SHARED MEMORY FOR MORE SPEED!!! ** //rets=(float*) calloc(end+1,sizeof(float)); hipHostMalloc(&rets, (int)(end)*sizeof(float), hipHostMallocDefault); //memset(rets, 0, (int)(end)*sizeof(float)); dolls=(double*) calloc(end+1,sizeof(double)); cumdolls=(double*) calloc(end+1,sizeof(double)); //pos=(float*) calloc(end+1,sizeof(float)); hipHostMalloc(&pos, (int)(end)*sizeof(float), hipHostMallocDefault); //memset(pos, 0, (int)(end)*sizeof(float)); //pnl=(float*) calloc(end+1,sizeof(float)); hipHostMalloc(&pnl, (int)(end)*sizeof(float), hipHostMallocDefault); //memset(pnl, 0, (int)(end)*sizeof(float)); cumpnl=(double*) calloc(end+1,sizeof(double)); //zscores=(float*) calloc(end+1,sizeof(float)); hipHostMalloc(&zscores, (int)(end)*sizeof(float), hipHostMallocDefault); //memset(zscores, 0, (int)(end)*sizeof(float)); sharpp=(double*) calloc(end+1,sizeof(double)); sumip=(double*) calloc(end+1,sizeof(double)); varip=(double*) calloc(end+1,sizeof(double)); stdevip=(double*) calloc(end+1,sizeof(double)); m_aveip=(double*) calloc(end+1,sizeof(double)); sharp=(double*) calloc(end+1,sizeof(double)); sumi=(double*) calloc(end+1,sizeof(double)); vari=(double*) calloc(end+1,sizeof(double)); stdevi=(double*) calloc(end+1,sizeof(double)); m_avei=(double*) calloc(end+1,sizeof(double)); sumv=(double*) calloc(end+1,sizeof(double)); varv=(double*) calloc(end+1,sizeof(double)); stdevv=(double*) calloc(end+1,sizeof(double)); m_avev=(double*) calloc(end+1,sizeof(double)); //** ALLOCATE SPACE FOR MEMORY FOR CUDA-RELATED DEVICE VARIABLES** hipMalloc((void**)&zscores_d, (int)(end)*sizeof(float)); //hipMemset(zscores_d, 0, (int)(end)*sizeof(float)); hipMalloc((void**)&pos_d, (int)(end)*sizeof(float)); hipMemset(pos_d, 0, (int)(end)*sizeof(float)); hipMalloc((void**)&pnl_d, (int)(end)*sizeof(float)); hipMemset(pnl_d, 0, (int)(end)*sizeof(float)); hipMalloc((void**)&rets_d, (int)(end)*sizeof(float)); starto = LOOKBACK + (STEP*(ii-1)); stopf = LOOKBACK + (STEP*(ii-0)); if(ii>1) tachy = 1; //use to go back n peroids for max sharpe offset if(stopf>=end) stopf = end; i=0; while(fgets(line,100,fin)>0){ sscanf(line,"%ld %lf %lf %lf %lf",&dt[i],&op[i],&hi[i],&lo[i],&p[i]);i++;} //scan lines from data file and store in arrays - this is price data here// f_close(source,fin); //close data file ret(p, end, rets); lensz = (int)(sharplist[gg][2][ii-tachy] ); zscore(lensz, p, sumv, varv, end, zscores, stdevv, m_avev); //** COPY CUDA VARIABLES FROM CPU (HOST) TO GPU (DEVICE) - USE ASYNC TRANSFER FOR MORE SPEED SO CPU DOES NOT HAVE TO WAIT FOR GPU TO FINISH OPERATION AND CAN PROCEED FURTHER IN THE MAIN PROGRAM** hipMemcpyAsync(zscores_d, zscores, (int)(end)*sizeof(float), hipMemcpyHostToDevice,0); hipMemcpyAsync(rets_d, rets, (int)(end)*sizeof(float), hipMemcpyHostToDevice,0); lenny=stopf-starto; dim3 threads; threads.x = 896; //use 896 threads as per specific GPU device for higher OCCUPANCY/USE OF CARD - trial-and-error via PROFILING //dim3 blocks; blocks.x = ((int)(end)/threads.x) + 1; //kernelSim<<<threads,blocks>>>(zscores_d,rets_d,pnl_d,pos_d,start,stop,a[z]); //** CALL GPU FUNCTION/KERNEL HERE FOR MODEL PARAMETER SWEEP TO GENERATE IN_SAMPLE RESULTS** kernelSim<<<threads,112>>>(zscores_d,rets_d,pnl_d,pos_d,starto,stopf,(float)(sharplist[gg][1][ii-tachy]),lensz); //** COPY CUDA VARIABLES/RESULTS FROM GPU (DEVICE) BACK TO CPU (HOST) - MUST WAIT FOR GPU OPERATION/FUNCTION TO FINISH HERE SINCE LOW ASYNC/CONCURRENCY ON NON_TESLA GPU DEVICES** hipMemcpy(pos, pos_d, (int)(end)*sizeof(float)/*stop-start*/, hipMemcpyDeviceToHost); hipMemcpy(pnl, pnl_d, (int)(end)*sizeof(float), hipMemcpyDeviceToHost); //for(i=starto;i<stopf;i++){ //OUT-OF-SAMPLE runs for old CPU CODE - NOW WE'RE USING GPU INSTEAD FOR MORE SPEED** // // if(zscores[i] > sharplist[gg][1][ii-tachy]) pos[i] = 1.00; // if(zscores[i] < -sharplist[gg][1][ii-tachy]) pos[i] = -1.00; // pnl[i] = (pos[i] * rets[i]);} sharpep(pnl, sumip, varip, starto, stopf, stdevip, m_aveip, sharpp); //ocum[gg][0][ii] = cumpnl[stopf-1]; //ocum[gg][1][ii] = cumdolls[stopf-1]; ocum[gg][2][ii] = sharpp[stopf-1]; if (sharpp[stopf-1] > 0.00) combos = combos + 1; sharpies = sharpies + sharpp[stopf-1]/(peri[gg]); counters=counters+1; mat =(((((ii+0)*(double)(STEP)))+LOOKBACK)/260); if (mat >= ((double)(end)/260)) mat = ((double)(end)/260); if (stop>=end) mat = ((double)(end)/260); printf("\nOS PNL: Market%d-%s Period%d", gg,marks[gg-1],ii); printf("\nOS-Sharpe\t%.2f", sharpp[stopf-1]); printf("\nOS-Param1\t%.2f", sharplist[gg][1][ii-tachy]); printf("\nOS-Param2\t%.0f", sharplist[gg][2][ii-tachy]); //printf("\nOS-Cum Ret\t%.2f%%", cumpnl[stopf-1]*100); //printf("\nOS-Ann Ret\t%.2f%%", (cumpnl[stopf-1]*100)/(LOOKBACK/260)); //printf("\nOS-Ann Vol\t%.2f%%", ABS(((cumpnl[stopf-1]*100)/(LOOKBACK/260))/sharpp[stopf-1])); printf("\nNum of Years: %.2f thru %.2f of %.2f total\n", (((((ii-1)*(double)(STEP)))+LOOKBACK)/260),mat,((double)(end)/260)); fprintf(recon,"\nOS PNL: Market%d-%s Period%d", gg,marks[gg-1],ii); fprintf(recon,"\nOS-Sharpe\t%.2f", sharpp[stopf-1]); fprintf(recon,"\nOS-Param1\t%.2f", sharplist[gg][1][ii-tachy]); fprintf(recon,"\nOS-Param2\t%.0f", sharplist[gg][2][ii-tachy]); //fprintf(recon,"\nOS-Cum Ret\t%.2f%%", cumpnl[stopf-1]*100); //fprintf(recon,"\nOS-Ann Ret\t%.2f%%", (cumpnl[stopf-1]*100)/(LOOKBACK/260)); //fprintf(recon,"\nOS-Ann Vol\t%.2f%%", ABS(((cumpnl[stopf-1]*100)/(LOOKBACK/260))/sharpp[stopf-1])); fprintf(recon,"\nNum of Years: %.2f thru %.2f of %.2f total\n", (((((ii-1)*(double)(STEP)))+LOOKBACK)/260),mat,((double)(end)/260)); for(i=starto;i<stopf;i++) { fprintf(ferr,"%ld\t %10.6lf\t %10.3lf\t %10.2lf\t %10.5lf\t %10.5lf\t %10.5lf\n",dt[i],p[i],zscores[i],pos[i],rets[i],pnl[i],sharpp[i]); fprintf(ferri,"%ld\t %10.6lf\t %10.3lf\t %10.2lf\t %10.5lf\t %10.5lf\t %10.5lf\n",dt[i],p[i],zscores[i],pos[i],rets[i],pnl[i],sharpp[i]+ocum[gg][2][ii-1]); } // f_close(dest,ferr); //close output file if (sharpp[stopf-1] > 0.00) winos = winos+ii/(peri[gg]); /*ocum[gg][0][ii] = cumpnl[stopf-1]; ocum[gg][1][ii] = cumdolls[stopf-1]; ocum[gg][2][ii] = sharpp[stopf-1];*/ tachy=0; free(hi);free(lo);free(p);free(price);free(smooth);free(detrender);free(period);free(qu);free(iu);free(ji);free(jq);free(ib);free(qb);free(sib);free(re);free(im);free(sre);free(sim);free(speriod);free(smperiod);free(qc);free(ic);free(ric);free(intperiod);free(sig);free(nois);free(snr); hipHostFree(rets);hipHostFree(zscores);free(cumpnl);free(op);free(sharp);free(sumi);free(vari);free(stdevi);free(m_avei);//hipFree(zscores);hipFree(rets);hipFree(pnl);hipFree(pos); free(sumv);free(varv);free(stdevv);free(m_avev);free(dolls);free(cumdolls);free(sharpp);free(sumip);free(varip);free(stdevip);free(m_aveip);hipHostFree(pos);hipHostFree(pnl); hipFree(zscores_d);hipFree(pnl_d);hipFree(pos_d);hipFree(rets_d); } f_close(desta,ferri); //close output file } t2 = clock(); diff = (((float)t2 - (float)t1) / 1000000.0F ) * 1000; printf("\n\n%.2f min..Avg I-MaxSharpe is %.2f..+I-Sharpes are %d of %d combos or %.0f%%..\n+%d Win Markets out of %i..Avg All I-Sharpes is %.2f\n\n",diff/60.0,avv/NUMI,combo,counter,(((double)(combo)/(double)(counter))*100),wins,NUMI,sharpie/(double)(counter)); fprintf(recon,"\n\n%.2f min..Avg I-MaxSharpe is %.2f..+I-Sharpes are %d of %d combos or %.0f%%..\n+%d Win Markets out of %i..Avg All I-Sharpes is %.2f\n\n",diff/60.0,avv/NUMI,combo,counter,(((double)(combo)/(double)(counter))*100),wins,NUMI,sharpie/(double)(counter)); printf("\n\n%.2f min..+O-Sharpes are %d of %d combos or %.0f%%..\n+%d Win Markets out of %i..Avg All O-Sharpes is %.2f\n\n",diff/60.0,combos,counters,(((double)(combos)/(double)(counters))*100),winos,NUMI,sharpies/(double)(NUMI));//(double)(counters)); //NUMI fprintf(recon,"\n\n%.2f sec..+O-Sharpes are %d of %d combos or %.0f%%..\n+%d Win Markets out of %i..Avg All O-Sharpes is %.2f\n\n",diff/60.0,combos,counters,(((double)(combos)/(double)(counters))*100),winos,NUMI,sharpies/(double)(NUMI)); //NUMI f_close(PATH"recon.dat",recon); //**RESET GPU DEVICE**// hipDeviceReset(); system("pause"); }
52ea91875f64e547d71e309ec64e0b481cf13525.cu
#include <stdio.h> #include <math.h> #include <io.h> #include <stdlib.h> #include <string.h> #include <float.h> #include "utility.h" #include "utility.c" //#include "Trade.cuh" #include <time.h> #include <malloc.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> //#include "helper_cuda.h" //#include "helper_functions.h" #pragma warning( disable : 4996 ) //Declaring macros and constants in pre-processor - STEP A//INSERT NEW STUFF DOWN HERE EACH TIME START SUB-FUNCTION******PRE-PROCESSOR AREA*******// #define ABS(X) (X>=0?X:-X) #define MAX(X,Y) (X>=Y?X:Y) #define MIN(X,Y) (X<=Y?X:Y) #define SIGN(X) (X>=0?(X==0?0:1):-1) #define ROUND(X,Y) ((X>=0?(X<<1)+Y:(X<<1)-Y)/(Y<<1))*Y //Change path below for UNIX "c://usr" #define PATH "F:\\" #define LOOKBACK 1597 // 1597-987-610-377-144-89 fibos rolling optimization historical period #define STEP 377 // or 89 fibos step forward in time period for next rolling optimization #define NUMI 27 //up to 27 number of markets //INT MAIN//INSERT NEW STUFF HERE EACH TIME START SUB-FUNCTION*******MAIN AREA****//Declare each new variable here - initializing and declaring space/memory for return arrays of variables or output we want****STEP B// int main(int argc, char **argv){ //void main (int argc, char *argv[]){ FILE *recon, *fin, *ferr, *fins,*ferri; int *intperiod,*start_h,*stop_h,c, lens=0, combos=0,counters=0,starto=0,tachy=0,lenny=0; long *dt; float *zscores_d, *pnl_d, *pos_d, *rets_d,*a_d,*a_h,*pos,*pnl,*zscores,*rets; double *op,*hi,*lo,*p,*price, *smooth, *detrender, *period, *qu, *iu, *ji, *jq, *ib, *qb, *sib, *sqb,*re, *im,*sre,*sim,*speriod,*smperiod,*qc,*ic,*ric,*sig,*nois,*snr, *cumpnl, *sharp; double *sumi, *vari, *stdevi, *m_avei,*sumv, *varv, *stdevv, *m_avev, *dolls, *cumdolls, *sumip, *varip, *stdevip, *m_aveip, *sharpp; int i=0,combo=0,ii=0,zz=0,wins=0,counter=0,start=1,stop=0,*start_d,*stop_d,startf=0,stopf=0,beg=1,high=0,m=0,mm=0,gg=0; char desty[50],dest[50],desta[50],tmp[50],strs[50],foldr[50],fnum[50],fnums[50],dir[50]; int peri[100] = { { 0 } }; double pp[] = { 42000.00, 42000.00, 50.00, 20.00, 100.00, 100.00, 10.00, 25.00, 5.00, 1000.00, 1000.00, 2000.00, 1000.00, 2500.00, 100.00, 25000.00, 5000.00, 50.00, 100000.00, 125000.00, 125000.00, 125000.00, 62500.00, 50.00, 1000.00, 10000.00, 50.00 }; char *marks[] = {"RBOB","HO","SP", "ND", "EMD", "TF", "FESX", "FDAX", "NK", "US", "TY", "TU", "FGBL", "ED", "GC", "HG", "SI", "PL", "AD", "EC", "SF", "JY", "BP", "S", "CL", "NG", "C"}; char *dfiles[] = {PATH"data0.dat",PATH"data1.dat",PATH"data2.dat",PATH"data3.dat",PATH"data4.dat",PATH"data5.dat",PATH"data6.dat",PATH"data7.dat",PATH"data8.dat",PATH"data9.dat",PATH"data10.dat",PATH"data11.dat",PATH"data12.dat",PATH"data13.dat", PATH"data14.dat",PATH"data15.dat",PATH"data16.dat",PATH"data17.dat",PATH"data18.dat",PATH"data19.dat",PATH"data20.dat",PATH"data21.dat",PATH"data22.dat",PATH"data23.dat",PATH"data24.dat",PATH"data25.dat",PATH"data26.dat"}; clock_t t1, t2; float diff; char sources[60],source[60],line[100]; long end, endf; double a[] = { 1.25, 1.50 }; //array holder for parameter combinations later on aka "parameter sweeps" which GPU can greatly speed up// a[] is # standard deviations// double b[] = { 21.00, 34.00 }; //double b[] = { 3.0, 5.0, 8.0, 10.0 }; double lensa=sizeof(a)/sizeof(double); double maxi=0.00,mat=0.00; double lensb=sizeof(b)/sizeof(double); double lensc=lensa * lensb; //number of parameter combinations double sharplist[100][7][100] = { { 0 } }; double table[100][5]= { { 0 } }; double ocum[100][3][100]= { { 0 } }; int z=0, j=0, winos=0,lensz=0; double sumss[100] = { { 0 } }; double avv=0.00, sharpie=0.00, sharpies=0.00; t1 = clock(); recon=f_openw(PATH"recon.dat"); for (gg = 1; gg <= NUMI; gg++) { // top loop for number of market data files passed thru dfiles[] // must change NUMI in #def as add number of markets // should I use STRUCT instead to store file names? sprintf(sources,dfiles[gg-1]); //find and open price data files to get lengths for periodicities// fins=f_openr(sources); endf=f_line(fins); endf--; peri[gg] = (int)(((endf-LOOKBACK)/STEP)+1); //number of rolling periods in each data set for rolling optimization (aka moving average)// f_close(sources,fins); sprintf(fnums, "%d", gg); strcpy(desta, PATH"OSrunALL"); //output directory for out-of-sample tests for all combined tests per market strcpy(foldr, ".dat"); strcat(desta,"-"); strcat(desta,fnums); strcat(desta,"-"); strcat(desta,marks[gg-1]); strcat(desta,foldr); ferri=f_openw(desta); for (ii = 1; ii <= peri[gg] ; ii++) { // loop is for periodicity - so 30yrs of price data divided into sub-units for rolling optimization (aka parameter sweeps) for (z = 0; z < lensa; z++) { // 2 nested for loops for parameter sweep or combination of arrays a[] and b[]// for (j = 0; j < lensb; j++) { lens = (int)(b[j]); mm=(lensa*z)+j; sprintf(strs, "%d", mm); sprintf(fnum, "%d", gg); sprintf(tmp, "%d", ii); strcpy(desty, PATH"ISout"); //output directory for in-sample tests strcpy(foldr, ".dat"); strcat(desty,strs); strcat(desty,"-"); strcat(desty,fnum); strcat(desty,"-"); strcat(desty,tmp); strcat(desty,"-"); strcat(desty,marks[gg-1]); strcat(desty,foldr); ferr=f_openw(desty); //find and open output file// sprintf(source,dfiles[gg-1]); //find and open price data file// fin=f_openr(source); end=f_line(fin); end--; start = beg + (STEP*(ii-1)); //start stop dates for inner loop ii for rolling period optimizations stop = LOOKBACK + (STEP*(ii-1)); dt=(long*) calloc(end+1,sizeof(long)); op=(double*) calloc(end+1,sizeof(double)); hi=(double*) calloc(end+1,sizeof(double)); lo=(double*) calloc(end+1,sizeof(double)); p=(double*) calloc(end+1,sizeof(double)); price=(double*) calloc(end+1,sizeof(double)); smooth=(double*) calloc(end+1,sizeof(double)); detrender=(double*) calloc(end+1,sizeof(double)); period=(double*) calloc(end+1,sizeof(double)); qu=(double*) calloc(end+1,sizeof(double)); iu=(double*) calloc(end+1,sizeof(double)); ji=(double*) calloc(end+1,sizeof(double)); jq=(double*) calloc(end+1,sizeof(double)); ib=(double*) calloc(end+1,sizeof(double)); qb=(double*) calloc(end+1,sizeof(double)); sib=(double*) calloc(end+1,sizeof(double)); sqb=(double*) calloc(end+1,sizeof(double)); re=(double*) calloc(end+1,sizeof(double)); im=(double*) calloc(end+1,sizeof(double)); sre=(double*) calloc(end+1,sizeof(double)); sim=(double*) calloc(end+1,sizeof(double)); speriod=(double*) calloc(end+1,sizeof(double)); smperiod=(double*) calloc(end+1,sizeof(double)); qc=(double*) calloc(end+1,sizeof(double)); ic=(double*) calloc(end+1,sizeof(double)); ric=(double*) calloc(end+1,sizeof(double)); intperiod=(int*) calloc(end+1,sizeof(double)); sig=(double*) calloc(end+1,sizeof(double)); nois=(double*) calloc(end+1,sizeof(double)); snr=(double*) calloc(end+1,sizeof(double)); //** ALLOCATE SPACE FOR MEMORY FOR CUDA-RELATED HOST VARIABLES - USE PINNED/SHARED MEMORY FOR MORE SPEED!!! ** //rets=(float*) calloc(end+1,sizeof(float)); cudaHostAlloc(&rets, (int)(end)*sizeof(float), cudaHostAllocDefault); //memset(rets, 0, (int)(end)*sizeof(float)); dolls=(double*) calloc(end+1,sizeof(double)); cumdolls=(double*) calloc(end+1,sizeof(double)); //pos=(float*) calloc(end+1,sizeof(float)); cudaHostAlloc(&pos, (int)(end)*sizeof(float), cudaHostAllocDefault); //memset(pos, 0, (int)(end)*sizeof(float)); //pnl=(float*) calloc(end+1,sizeof(float)); cudaHostAlloc(&pnl, (int)(end)*sizeof(float), cudaHostAllocDefault); //memset(pnl, 0, (int)(end)*sizeof(float)); cumpnl=(double*) calloc(end+1,sizeof(double)); //zscores=(float*) calloc(end+1,sizeof(float)); cudaHostAlloc(&zscores, (int)(end)*sizeof(float), cudaHostAllocDefault); //memset(zscores, 0, (int)(end)*sizeof(float)); sharpp=(double*) calloc(end+1,sizeof(double)); sumip=(double*) calloc(end+1,sizeof(double)); varip=(double*) calloc(end+1,sizeof(double)); stdevip=(double*) calloc(end+1,sizeof(double)); m_aveip=(double*) calloc(end+1,sizeof(double)); sharp=(double*) calloc(end+1,sizeof(double)); sumi=(double*) calloc(end+1,sizeof(double)); vari=(double*) calloc(end+1,sizeof(double)); stdevi=(double*) calloc(end+1,sizeof(double)); m_avei=(double*) calloc(end+1,sizeof(double)); sumv=(double*) calloc(end+1,sizeof(double)); varv=(double*) calloc(end+1,sizeof(double)); stdevv=(double*) calloc(end+1,sizeof(double)); m_avev=(double*) calloc(end+1,sizeof(double)); //** ALLOCATE SPACE FOR MEMORY FOR CUDA-RELATED DEVICE VARIABLES** cudaMalloc((void**)&zscores_d, (int)(end)*sizeof(float)); //cudaMemset(zscores_d, 0, (int)(end)*sizeof(float)); cudaMalloc((void**)&pos_d, (int)(end)*sizeof(float)); cudaMemset(pos_d, 0, (int)(end)*sizeof(float)); cudaMalloc((void**)&pnl_d, (int)(end)*sizeof(float)); cudaMemset(pnl_d, 0, (int)(end)*sizeof(float)); cudaMalloc((void**)&rets_d, (int)(end)*sizeof(float)); //cudaMemset(rets_d, 0, (int)(end)*sizeof(float)); /*cudaMalloc((void**)&a_d, 1); cudaMalloc((void**)&start_d, 1); cudaMalloc((void**)&stop_d, 1);*/ //INSERT NEW STUFF HERE EACH TIME START SUB-FUNCTION*************///CALLING function//STEP C// i=0; while(fgets(line,100,fin)>0){ sscanf(line,"%ld %lf %lf %lf %lf",&dt[i],&op[i],&hi[i],&lo[i],&p[i]);i++;} //scan lines from data file and store in arrays - this is price data here// f_close(source,fin); //close data file //Using or CALLING function here// DO NOT NEED TO DEFINE INPUTS-OUTPUTS here - that is done at bottom down BELOW!!// ret(p, end, rets); //snf(op, lo, hi, end, price, smooth, detrender, period, qu, iu, ji, jq, ib, qb, sib, sqb, re, im, sre, sim, speriod, smperiod, qc, ic, ric, intperiod, sig, nois, snr); zscore(lens, p, sumv, varv, end, zscores, stdevv, m_avev); m = (lensa*z)+j; //** COPY CUDA VARIABLES FROM CPU (HOST) TO GPU (DEVICE) - USE ASYNC TRANSFER FOR MORE SPEED SO CPU DOES NOT HAVE TO WAIT FOR GPU TO FINISH OPERATION AND CAN PROCEED FURTHER IN THE MAIN PROGRAM** cudaMemcpyAsync(zscores_d, zscores, (int)(end)*sizeof(float), cudaMemcpyHostToDevice,0); cudaMemcpyAsync(rets_d, rets, (int)(end)*sizeof(float), cudaMemcpyHostToDevice,0); lenny=stop-start; dim3 threads; threads.x = 896; //use 896 threads as per specific GPU device for higher OCCUPANCY/USE OF CARD - trial-and-error via PROFILING //dim3 blocks; blocks.x = ((int)(end)/threads.x) + 1; //kernelSim<<<threads,blocks>>>(zscores_d,rets_d,pnl_d,pos_d,start,stop,a[z]); //** CALL GPU FUNCTION/KERNEL HERE FOR MODEL PARAMETER SWEEP TO GENERATE IN_SAMPLE RESULTS** kernelSim<<<threads,112>>>(zscores_d,rets_d,pnl_d,pos_d,start,stop,(float)(a[z]),lens); //** COPY CUDA VARIABLES/RESULTS FROM GPU (DEVICE) BACK TO CPU (HOST) - MUST WAIT FOR GPU OPERATION/FUNCTION TO FINISH HERE SINCE LOW ASYNC/CONCURRENCY ON NON_TESLA GPU DEVICES** cudaMemcpy(pos, pos_d, (int)(end)*sizeof(float)/*stop-start*/, cudaMemcpyDeviceToHost); cudaMemcpy(pnl, pnl_d, (int)(end)*sizeof(float), cudaMemcpyDeviceToHost); //for(i=start;i<stop;i++){ //IN-sample rolling optimization for old CPU CODE - NOW WE'RE USING GPU INSTEAD FOR MORE SPEED** // // if(zscores[i] > a[z]) pos[i] = 1.00; // if(zscores[i] < -a[z]) pos[i] = -1.00; // pnl[i] = (pos[i] * rets[i]); } sharpep(pnl, sumip, varip, start, stop, stdevip, m_aveip, sharpp); table[m][0] = m; table[m][1] = a[z]; table[m][2] = b[j]; table[m][3] = sharpp[stop-1]; sharpie = sharpie + sharpp[stop-1];//end? if (table[m][3] > 0.00) combo = combo + 1; counter=counter+1; //table[m][4] = cumpnl[stop-1]; printf("\nIS Test%.0f Market%d-%s Period%d", table[m][0],gg,marks[gg-1],ii); printf("\nSharpe\t%.2f", table[m][3]); printf("\nParam1\t%.2f", table[m][1]); printf("\nParam2\t%.0f", table[m][2]); //printf("\nCum Ret\t%.2f%%", table[m][4]*100); //printf("\nAnn Ret\t%.2f%%", (table[m][4]*100)/(LOOKBACK/260)); //printf("\nAnn Vol\t%.2f%%", ABS(((table[m][4]*100)/(LOOKBACK/260))/table[m][3])); printf("\nNum of Years: %.2f thru %.2f of %.2f total\n", ((((ii-1)*(double)(STEP)))/260),(((double)(LOOKBACK) + (ii*(double)(STEP)))/260)-(double)(STEP)/260,((double)(end)/260)); fprintf(recon,"\nIS Test%.0f Market%d-%s Period%d", table[m][0],gg,marks[gg-1],ii); fprintf(recon,"\nSharpe\t%.2f", table[m][3]); fprintf(recon,"\nParam1\t%.2f", table[m][1]); fprintf(recon,"\nParam2\t%.0f", table[m][2]); //fprintf(recon,"\nCum Ret\t%.2f%%", table[m][4]*100); //fprintf(recon,"\nAnn Ret\t%.2f%%", (table[m][4]*100)/(LOOKBACK/260)); //fprintf(recon,"\nAnn Vol\t%.2f%%", ABS(((table[m][4]*100)/(LOOKBACK/260))/table[m][3])); fprintf(recon,"\nNum of Years: %.2f thru %.2f of %.2f total\n", ((((ii-1)*(double)(STEP)))/260),(((double)(LOOKBACK) + (ii*(double)(STEP)))/260)-(double)(STEP)/260,((double)(end)/260)); for(i=start;i<stop;i++) { fprintf(ferr,"%ld\t %10.6lf\t %10.3lf\t %10.2lf\t %10.5lf\t %10.5lf\t %10.5lf\n",dt[i],p[i],zscores[i],pos[i],rets[i],pnl[i],sharpp[i]); } // f_close(desty,ferr); //close output file for (i = 0; i < lensc; i++) //find best sharpe ratio from table { if (table[i][3] > maxi) maxi = table[i][3]; if (maxi == table[i][3]) high=i; } sharplist[gg][0][ii] = high; //row of max sharpe recap sharplist[gg][6][ii] = table[high][3]; //max sharpe sharplist[gg][1][ii] = table[high][1]; //param 1 recap sharplist[gg][2][ii] = table[high][2]; //param 2 recap sharplist[gg][3][ii] = table[high][4]; //cum ret recap sharplist[gg][4][ii] = table[high][0]; //test number recap sharplist[gg][5][ii] = gg; //market number recap maxi=0.00; //ADD IN EACH POINTER VARIABLE HERE - FREEING UP SPACE IN MEMORY*******STEP D// //cudaDeviceReset(); free(hi);free(lo);free(p);free(price);free(smooth);free(detrender);free(period);free(qu);free(iu);free(ji);free(jq);free(ib);free(qb);free(sib);free(re);free(im);free(sre);free(sim);free(speriod);free(smperiod);free(qc);free(ic);free(ric);free(intperiod);free(sig);free(nois);free(snr); cudaFreeHost(rets);cudaFreeHost(zscores);free(cumpnl);free(op);free(sharp);free(sumi);free(vari);free(stdevi);free(m_avei); free(sumv);free(varv);free(stdevv);free(m_avev);free(dolls);free(cumdolls);free(sharpp);free(sumip);free(varip);free(stdevip);free(m_aveip);//free(a_h);free(start_h);free(stop_h); cudaFree(zscores_d);cudaFree(pnl_d);cudaFree(pos_d);cudaFree(rets_d);cudaFreeHost(pos);cudaFreeHost(pnl); //cudaFreeHost(zscores_d);cudaFreeHost(pnl_d);cudaFreeHost(pos_d);cudaFreeHost(rets_d); } } avv = avv+sharplist[gg][6][ii]/peri[gg]; //avg max sharpe if (sharplist[gg][6][ii] > 0.00) wins = wins+ii/peri[gg]; //winning markets printf("\n%.2f Max Sharpe of Market%d-%s Period%d is Test %.0f with STD %.2f and SNR %.0f\n", sharplist[gg][6][ii],gg,marks[gg-1],ii,sharplist[gg][0][ii],sharplist[gg][1][ii],sharplist[gg][2][ii]); fprintf(recon,"\n%.2f Max Sharpe of Market%d-%s Period%d is Test %.0f with STD %.2f and SNR %.0f\n", sharplist[gg][6][ii],gg,marks[gg-1],ii,sharplist[gg][0][ii],sharplist[gg][1][ii],sharplist[gg][2][ii]); sprintf(fnum, "%d", gg); sprintf(tmp, "%d", ii); strcpy(dest, PATH"OSrun"); //output directory for out-of-sample tests strcpy(foldr, ".dat"); strcat(dest,"-"); strcat(dest,fnum); strcat(dest,"-"); strcat(dest,tmp); strcat(dest,"-"); strcat(dest,marks[gg-1]); strcat(dest,foldr); ferr=f_openw(dest); //find and open output file// sprintf(source,dfiles[gg-1]); //find and open price data file// fin=f_openr(source); end=f_line(fin); end--; dt=(long*) calloc(end+1,sizeof(long)); op=(double*) calloc(end+1,sizeof(double)); hi=(double*) calloc(end+1,sizeof(double)); lo=(double*) calloc(end+1,sizeof(double)); p=(double*) calloc(end+1,sizeof(double)); price=(double*) calloc(end+1,sizeof(double)); smooth=(double*) calloc(end+1,sizeof(double)); detrender=(double*) calloc(end+1,sizeof(double)); period=(double*) calloc(end+1,sizeof(double)); qu=(double*) calloc(end+1,sizeof(double)); iu=(double*) calloc(end+1,sizeof(double)); ji=(double*) calloc(end+1,sizeof(double)); jq=(double*) calloc(end+1,sizeof(double)); ib=(double*) calloc(end+1,sizeof(double)); qb=(double*) calloc(end+1,sizeof(double)); sib=(double*) calloc(end+1,sizeof(double)); sqb=(double*) calloc(end+1,sizeof(double)); re=(double*) calloc(end+1,sizeof(double)); im=(double*) calloc(end+1,sizeof(double)); sre=(double*) calloc(end+1,sizeof(double)); sim=(double*) calloc(end+1,sizeof(double)); speriod=(double*) calloc(end+1,sizeof(double)); smperiod=(double*) calloc(end+1,sizeof(double)); qc=(double*) calloc(end+1,sizeof(double)); ic=(double*) calloc(end+1,sizeof(double)); ric=(double*) calloc(end+1,sizeof(double)); intperiod=(int*) calloc(end+1,sizeof(double)); sig=(double*) calloc(end+1,sizeof(double)); nois=(double*) calloc(end+1,sizeof(double)); snr=(double*) calloc(end+1,sizeof(double)); //** ALLOCATE SPACE FOR MEMORY FOR CUDA-RELATED HOST VARIABLES - USE PINNED/SHARED MEMORY FOR MORE SPEED!!! ** //rets=(float*) calloc(end+1,sizeof(float)); cudaHostAlloc(&rets, (int)(end)*sizeof(float), cudaHostAllocDefault); //memset(rets, 0, (int)(end)*sizeof(float)); dolls=(double*) calloc(end+1,sizeof(double)); cumdolls=(double*) calloc(end+1,sizeof(double)); //pos=(float*) calloc(end+1,sizeof(float)); cudaHostAlloc(&pos, (int)(end)*sizeof(float), cudaHostAllocDefault); //memset(pos, 0, (int)(end)*sizeof(float)); //pnl=(float*) calloc(end+1,sizeof(float)); cudaHostAlloc(&pnl, (int)(end)*sizeof(float), cudaHostAllocDefault); //memset(pnl, 0, (int)(end)*sizeof(float)); cumpnl=(double*) calloc(end+1,sizeof(double)); //zscores=(float*) calloc(end+1,sizeof(float)); cudaHostAlloc(&zscores, (int)(end)*sizeof(float), cudaHostAllocDefault); //memset(zscores, 0, (int)(end)*sizeof(float)); sharpp=(double*) calloc(end+1,sizeof(double)); sumip=(double*) calloc(end+1,sizeof(double)); varip=(double*) calloc(end+1,sizeof(double)); stdevip=(double*) calloc(end+1,sizeof(double)); m_aveip=(double*) calloc(end+1,sizeof(double)); sharp=(double*) calloc(end+1,sizeof(double)); sumi=(double*) calloc(end+1,sizeof(double)); vari=(double*) calloc(end+1,sizeof(double)); stdevi=(double*) calloc(end+1,sizeof(double)); m_avei=(double*) calloc(end+1,sizeof(double)); sumv=(double*) calloc(end+1,sizeof(double)); varv=(double*) calloc(end+1,sizeof(double)); stdevv=(double*) calloc(end+1,sizeof(double)); m_avev=(double*) calloc(end+1,sizeof(double)); //** ALLOCATE SPACE FOR MEMORY FOR CUDA-RELATED DEVICE VARIABLES** cudaMalloc((void**)&zscores_d, (int)(end)*sizeof(float)); //cudaMemset(zscores_d, 0, (int)(end)*sizeof(float)); cudaMalloc((void**)&pos_d, (int)(end)*sizeof(float)); cudaMemset(pos_d, 0, (int)(end)*sizeof(float)); cudaMalloc((void**)&pnl_d, (int)(end)*sizeof(float)); cudaMemset(pnl_d, 0, (int)(end)*sizeof(float)); cudaMalloc((void**)&rets_d, (int)(end)*sizeof(float)); starto = LOOKBACK + (STEP*(ii-1)); stopf = LOOKBACK + (STEP*(ii-0)); if(ii>1) tachy = 1; //use to go back n peroids for max sharpe offset if(stopf>=end) stopf = end; i=0; while(fgets(line,100,fin)>0){ sscanf(line,"%ld %lf %lf %lf %lf",&dt[i],&op[i],&hi[i],&lo[i],&p[i]);i++;} //scan lines from data file and store in arrays - this is price data here// f_close(source,fin); //close data file ret(p, end, rets); lensz = (int)(sharplist[gg][2][ii-tachy] ); zscore(lensz, p, sumv, varv, end, zscores, stdevv, m_avev); //** COPY CUDA VARIABLES FROM CPU (HOST) TO GPU (DEVICE) - USE ASYNC TRANSFER FOR MORE SPEED SO CPU DOES NOT HAVE TO WAIT FOR GPU TO FINISH OPERATION AND CAN PROCEED FURTHER IN THE MAIN PROGRAM** cudaMemcpyAsync(zscores_d, zscores, (int)(end)*sizeof(float), cudaMemcpyHostToDevice,0); cudaMemcpyAsync(rets_d, rets, (int)(end)*sizeof(float), cudaMemcpyHostToDevice,0); lenny=stopf-starto; dim3 threads; threads.x = 896; //use 896 threads as per specific GPU device for higher OCCUPANCY/USE OF CARD - trial-and-error via PROFILING //dim3 blocks; blocks.x = ((int)(end)/threads.x) + 1; //kernelSim<<<threads,blocks>>>(zscores_d,rets_d,pnl_d,pos_d,start,stop,a[z]); //** CALL GPU FUNCTION/KERNEL HERE FOR MODEL PARAMETER SWEEP TO GENERATE IN_SAMPLE RESULTS** kernelSim<<<threads,112>>>(zscores_d,rets_d,pnl_d,pos_d,starto,stopf,(float)(sharplist[gg][1][ii-tachy]),lensz); //** COPY CUDA VARIABLES/RESULTS FROM GPU (DEVICE) BACK TO CPU (HOST) - MUST WAIT FOR GPU OPERATION/FUNCTION TO FINISH HERE SINCE LOW ASYNC/CONCURRENCY ON NON_TESLA GPU DEVICES** cudaMemcpy(pos, pos_d, (int)(end)*sizeof(float)/*stop-start*/, cudaMemcpyDeviceToHost); cudaMemcpy(pnl, pnl_d, (int)(end)*sizeof(float), cudaMemcpyDeviceToHost); //for(i=starto;i<stopf;i++){ //OUT-OF-SAMPLE runs for old CPU CODE - NOW WE'RE USING GPU INSTEAD FOR MORE SPEED** // // if(zscores[i] > sharplist[gg][1][ii-tachy]) pos[i] = 1.00; // if(zscores[i] < -sharplist[gg][1][ii-tachy]) pos[i] = -1.00; // pnl[i] = (pos[i] * rets[i]);} sharpep(pnl, sumip, varip, starto, stopf, stdevip, m_aveip, sharpp); //ocum[gg][0][ii] = cumpnl[stopf-1]; //ocum[gg][1][ii] = cumdolls[stopf-1]; ocum[gg][2][ii] = sharpp[stopf-1]; if (sharpp[stopf-1] > 0.00) combos = combos + 1; sharpies = sharpies + sharpp[stopf-1]/(peri[gg]); counters=counters+1; mat =(((((ii+0)*(double)(STEP)))+LOOKBACK)/260); if (mat >= ((double)(end)/260)) mat = ((double)(end)/260); if (stop>=end) mat = ((double)(end)/260); printf("\nOS PNL: Market%d-%s Period%d", gg,marks[gg-1],ii); printf("\nOS-Sharpe\t%.2f", sharpp[stopf-1]); printf("\nOS-Param1\t%.2f", sharplist[gg][1][ii-tachy]); printf("\nOS-Param2\t%.0f", sharplist[gg][2][ii-tachy]); //printf("\nOS-Cum Ret\t%.2f%%", cumpnl[stopf-1]*100); //printf("\nOS-Ann Ret\t%.2f%%", (cumpnl[stopf-1]*100)/(LOOKBACK/260)); //printf("\nOS-Ann Vol\t%.2f%%", ABS(((cumpnl[stopf-1]*100)/(LOOKBACK/260))/sharpp[stopf-1])); printf("\nNum of Years: %.2f thru %.2f of %.2f total\n", (((((ii-1)*(double)(STEP)))+LOOKBACK)/260),mat,((double)(end)/260)); fprintf(recon,"\nOS PNL: Market%d-%s Period%d", gg,marks[gg-1],ii); fprintf(recon,"\nOS-Sharpe\t%.2f", sharpp[stopf-1]); fprintf(recon,"\nOS-Param1\t%.2f", sharplist[gg][1][ii-tachy]); fprintf(recon,"\nOS-Param2\t%.0f", sharplist[gg][2][ii-tachy]); //fprintf(recon,"\nOS-Cum Ret\t%.2f%%", cumpnl[stopf-1]*100); //fprintf(recon,"\nOS-Ann Ret\t%.2f%%", (cumpnl[stopf-1]*100)/(LOOKBACK/260)); //fprintf(recon,"\nOS-Ann Vol\t%.2f%%", ABS(((cumpnl[stopf-1]*100)/(LOOKBACK/260))/sharpp[stopf-1])); fprintf(recon,"\nNum of Years: %.2f thru %.2f of %.2f total\n", (((((ii-1)*(double)(STEP)))+LOOKBACK)/260),mat,((double)(end)/260)); for(i=starto;i<stopf;i++) { fprintf(ferr,"%ld\t %10.6lf\t %10.3lf\t %10.2lf\t %10.5lf\t %10.5lf\t %10.5lf\n",dt[i],p[i],zscores[i],pos[i],rets[i],pnl[i],sharpp[i]); fprintf(ferri,"%ld\t %10.6lf\t %10.3lf\t %10.2lf\t %10.5lf\t %10.5lf\t %10.5lf\n",dt[i],p[i],zscores[i],pos[i],rets[i],pnl[i],sharpp[i]+ocum[gg][2][ii-1]); } // f_close(dest,ferr); //close output file if (sharpp[stopf-1] > 0.00) winos = winos+ii/(peri[gg]); /*ocum[gg][0][ii] = cumpnl[stopf-1]; ocum[gg][1][ii] = cumdolls[stopf-1]; ocum[gg][2][ii] = sharpp[stopf-1];*/ tachy=0; free(hi);free(lo);free(p);free(price);free(smooth);free(detrender);free(period);free(qu);free(iu);free(ji);free(jq);free(ib);free(qb);free(sib);free(re);free(im);free(sre);free(sim);free(speriod);free(smperiod);free(qc);free(ic);free(ric);free(intperiod);free(sig);free(nois);free(snr); cudaFreeHost(rets);cudaFreeHost(zscores);free(cumpnl);free(op);free(sharp);free(sumi);free(vari);free(stdevi);free(m_avei);//cudaFree(zscores);cudaFree(rets);cudaFree(pnl);cudaFree(pos); free(sumv);free(varv);free(stdevv);free(m_avev);free(dolls);free(cumdolls);free(sharpp);free(sumip);free(varip);free(stdevip);free(m_aveip);cudaFreeHost(pos);cudaFreeHost(pnl); cudaFree(zscores_d);cudaFree(pnl_d);cudaFree(pos_d);cudaFree(rets_d); } f_close(desta,ferri); //close output file } t2 = clock(); diff = (((float)t2 - (float)t1) / 1000000.0F ) * 1000; printf("\n\n%.2f min..Avg I-MaxSharpe is %.2f..+I-Sharpes are %d of %d combos or %.0f%%..\n+%d Win Markets out of %i..Avg All I-Sharpes is %.2f\n\n",diff/60.0,avv/NUMI,combo,counter,(((double)(combo)/(double)(counter))*100),wins,NUMI,sharpie/(double)(counter)); fprintf(recon,"\n\n%.2f min..Avg I-MaxSharpe is %.2f..+I-Sharpes are %d of %d combos or %.0f%%..\n+%d Win Markets out of %i..Avg All I-Sharpes is %.2f\n\n",diff/60.0,avv/NUMI,combo,counter,(((double)(combo)/(double)(counter))*100),wins,NUMI,sharpie/(double)(counter)); printf("\n\n%.2f min..+O-Sharpes are %d of %d combos or %.0f%%..\n+%d Win Markets out of %i..Avg All O-Sharpes is %.2f\n\n",diff/60.0,combos,counters,(((double)(combos)/(double)(counters))*100),winos,NUMI,sharpies/(double)(NUMI));//(double)(counters)); //NUMI fprintf(recon,"\n\n%.2f sec..+O-Sharpes are %d of %d combos or %.0f%%..\n+%d Win Markets out of %i..Avg All O-Sharpes is %.2f\n\n",diff/60.0,combos,counters,(((double)(combos)/(double)(counters))*100),winos,NUMI,sharpies/(double)(NUMI)); //NUMI f_close(PATH"recon.dat",recon); //**RESET GPU DEVICE**// cudaDeviceReset(); system("pause"); }
a5876e338df6cf038a44e208e8baad273a8258fa.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <iostream> using namespace std; __global__ void mandelKernel(int* result, float lowerX, float lowerY, float stepX, float stepY, int iter, int resX, int resY, int pitch) { // To avoid error caused by the floating number, use the following pseudo code // int thisY = threadIdx.y + blockIdx.y * blockDim.y; int thisX = threadIdx.x + blockIdx.x * blockDim.x; // int* row = (int*)((char*)result+thisX*pitch); if (thisY >= resY || thisX >= resX) return; float x = lowerX + (float)thisX * stepX; float y = lowerY + (float)thisY * stepY; float x0 = x, y0 = y; int i; for (i = 0; i < iter; i++) { if (x * x + y * y > 4.f) break; float new_x = x * x - y * y; float new_y = 2.f * x * y; x = new_x + x0; y = new_y + y0; } result[thisY*pitch+thisX] = i; return; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int N = resX*resY; // host size_t in = N * sizeof(int); int* result; hipHostMalloc(&result, in, hipHostMallocPortable); // device int *d_result; size_t pitch; // hipMalloc(&d_result, in); hipMallocPitch((void **)&d_result, &pitch, sizeof(int)*resX, (size_t) resY); // kernel dim3 blockSize(32, 32); dim3 gridSize((pitch/sizeof(float)+blockSize.x-1)/blockSize.x,(resY+blockSize.y-1)/blockSize.y); // kernel mandelKernel << < gridSize, blockSize >> >(d_result, lowerX, lowerY, stepX, stepY, maxIterations, resX, resY, pitch/sizeof(float)); // devicehost hipMemcpy2D(result, sizeof(float)*resX,d_result, pitch, sizeof(float)*resX, resY, hipMemcpyDeviceToHost); memcpy(img, result, in); // device hipFree(d_result); // host hipHostFree(result); }
a5876e338df6cf038a44e208e8baad273a8258fa.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <iostream> using namespace std; __global__ void mandelKernel(int* result, float lowerX, float lowerY, float stepX, float stepY, int iter, int resX, int resY, int pitch) { // To avoid error caused by the floating number, use the following pseudo code // int thisY = threadIdx.y + blockIdx.y * blockDim.y; int thisX = threadIdx.x + blockIdx.x * blockDim.x; // int* row = (int*)((char*)result+thisX*pitch); if (thisY >= resY || thisX >= resX) return; float x = lowerX + (float)thisX * stepX; float y = lowerY + (float)thisY * stepY; float x0 = x, y0 = y; int i; for (i = 0; i < iter; i++) { if (x * x + y * y > 4.f) break; float new_x = x * x - y * y; float new_y = 2.f * x * y; x = new_x + x0; y = new_y + y0; } result[thisY*pitch+thisX] = i; return; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int N = resX*resY; // 申請host記憶體 size_t in = N * sizeof(int); int* result; cudaHostAlloc(&result, in, cudaHostAllocPortable); // 申請device記憶體 int *d_result; size_t pitch; // cudaMalloc(&d_result, in); cudaMallocPitch((void **)&d_result, &pitch, sizeof(int)*resX, (size_t) resY); // 定義kernel的執行配置 dim3 blockSize(32, 32); dim3 gridSize((pitch/sizeof(float)+blockSize.x-1)/blockSize.x,(resY+blockSize.y-1)/blockSize.y); // 執行kernel mandelKernel << < gridSize, blockSize >> >(d_result, lowerX, lowerY, stepX, stepY, maxIterations, resX, resY, pitch/sizeof(float)); // 將device得到的結果拷貝到host cudaMemcpy2D(result, sizeof(float)*resX,d_result, pitch, sizeof(float)*resX, resY, cudaMemcpyDeviceToHost); memcpy(img, result, in); // 釋放device記憶體 cudaFree(d_result); // 釋放host記憶體 cudaFreeHost(result); }
52114595de94b284584f518fe7ce125dc1b447fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaRTCommon.h" #include <thrust/remove.h> #include <thrust/execution_policy.h> #define BLOCK_SIZE 16 #define NORMALRAY_BOUND_MAX 10 #define PATHSTREAM_SIZE 1E4*64 namespace cudaRTPTStream { RT_ATTRIBS_N(0) RT_ATTRIBS_BGN RT_ATTRIBS_END enum RAYTYPE { RAYTYPE_EYE = 0, RAYTYPE_DIFF = 1, RAYTYPE_SPEC = 2 }; struct PTPathVertex { uint isTerminated; uint2 pathPixel; float3 pathOutDir; float3 pathVertexPos; float3 pathOutMulTerm; RAYTYPE pathType; float3 pathSample; float3 pathAccumSample; uint pathSampleN; uint pathSampleDepth; hiprandState_t randState; __device__ PTPathVertex(uint _isTerminated, uint2 _pathPixel, float3 _pathOutDir, float3 _pathVertexPos, RAYTYPE _pathType, hiprandState_t _randState) : isTerminated(_isTerminated) , pathPixel(_pathPixel) , pathOutDir(_pathOutDir) , pathVertexPos(_pathVertexPos) , pathOutMulTerm(make_float3(1.f,1.f,1.f)) , pathType(_pathType) , pathSample(make_float3(0.f, 0.f, 0.f)) , pathAccumSample(make_float3(0.f, 0.f, 0.f)) , pathSampleN(0) , pathSampleDepth(0) , randState(_randState) {} }; PTPathVertex* g_devPathQueue = nullptr; uint g_uPathQueueCur = 0; uint g_uPathQueueSize = 0; PTPathVertex** g_devPathStream = nullptr; uint g_uPathStreamSize = PATHSTREAM_SIZE; void freeStreamMem() { g_uPathQueueCur = g_uPathQueueSize = 0; CUFREE(g_devPathQueue); CUFREE(g_devPathStream); } void allocateStreamMem(uint queueSize = 480000) { g_uPathQueueSize = queueSize; HANDLE_ERROR(hipMalloc((void**)&g_devPathQueue, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(hipMemset((void*)g_devPathQueue, 0, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(hipMalloc((void**)&g_devPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize)); HANDLE_ERROR(hipMemset((void*)g_devPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize)); } float* g_devResultData = nullptr; float* g_devAccResultData = nullptr; NPMathHelper::Mat4x4 g_matLastCamMat; NPMathHelper::Mat4x4 g_matCurCamMat; uint32 g_uCurFrameN = 0; size_t g_resultDataSize = 0; uint32 WangHash(uint32 a) { a = (a ^ 61) ^ (a >> 16); a = a + (a << 3); a = a ^ (a >> 4); a = a * 0x27d4eb2d; a = a ^ (a >> 15); return a; } __global__ void pt_traceSample_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= activePathStreamSize || pathStream[x]->isTerminated) return; PTPathVertex* procVertex = pathStream[x]; CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir); TracePrimitiveResult traceResult; if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false)) { RTTriangle* tri = &triangles[traceResult.triId]; RTMaterial* mat = &materials[tri->matInd]; RTVertex* v0 = &vertices[tri->vertInd0]; RTVertex* v1 = &vertices[tri->vertInd1]; RTVertex* v2 = &vertices[tri->vertInd2]; float2 uv0 = make_float2(v0->tex._x, v0->tex._y); float2 uv1 = make_float2(v1->tex._x, v1->tex._y); float2 uv2 = make_float2(v2->tex._x, v2->tex._y); float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v; float3 n0 = V32F3(v0->norm); float3 n1 = V32F3(v1->norm); float3 n2 = V32F3(v2->norm); float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v; float3 diff; float3 emissive; float trans; float specular; float metallic; float roughness; float anisotropic; float sheen; float sheenTint; float clearcoat; float clearcoatGloss; GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness , anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss); float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm; { // Get some random microfacet float3 hDir = ImportanceSampleGGX(make_float2(hiprand_uniform(&procVertex->randState), hiprand_uniform(&procVertex->randState)), roughness, nl); // Calculate flesnel float voH = vecDot(-1 * ray.dir, hDir); float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic); float3 brdf_f = Fresnel(f0, voH); // Reflected or Refracted float reflProb = lerp(length(brdf_f), 1.0f, metallic); float refrProb = trans; float3 reflDir; float3 refrDir; CURay nextRay = ray; float3 lightMulTerm; RAYTYPE nextRayType = procVertex->pathType; if (refrProb > 0) { bool into = vecDot(nl, norm) > 0.f; float nt = specular * 0.8f + 1.f; float nc = 1.0f; float nnt = into ? nc / nt : nt / nc; float ddn = vecDot(hDir, ray.dir); float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn); if (cos2t < 0.f) { reflProb = 1.0f;// refrProb = 0.f; } else { refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t))); } } if (reflProb > 0) { reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir)); if (vecDot(reflDir, nl) < 0.f) reflProb = 0.f; } // Reflected if (ProbabilityRand(&procVertex->randState, reflProb)) { nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir); // ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); // Microfacet specular = D*G*F / (4*NoL*NoV) // pdf = D * NoH / (4 * VoH) // (G * F * VoH) / (NoV * NoH) float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoH = vecDot(nl, hDir); float NoL = vecDot(nl, reflDir); float G = GeometricVisibility(roughness, NoV, NoL, VoH); //shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive; lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb); nextRayType = RAYTYPE_SPEC; } // Diffused or Transmited else { // Transmited if (ProbabilityRand(&procVertex->randState, refrProb)) { nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float cosine = vecDot(-1 * nl, refrDir); //shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive; lightMulTerm = cosine * diff / (refrProb * (1 - reflProb)); nextRayType = RAYTYPE_SPEC; } // Diffused else { float3 w = nl; float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = vecCross(w, u); u = vecCross(v, w); float r1 = 2.f * M_PI * hiprand_uniform(&procVertex->randState); float r2cos = sqrtf(hiprand_uniform(&procVertex->randState)); float r2sin = 1.f - r2cos*r2cos; float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1)); nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, diffDir); //shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive; lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb)); nextRayType = RAYTYPE_DIFF; } } procVertex->pathSample = procVertex->pathSample + vecMul(emissive , procVertex->pathOutMulTerm); float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm); if (/*(procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) ||*/ length(emissive) > 0.f) pixelContrib = 0.f; if (hiprand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX) { procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample; procVertex->pathSampleN++; procVertex->isTerminated = true; } else { procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm); procVertex->pathOutDir = nextRay.dir; procVertex->pathVertexPos = nextRay.orig; procVertex->pathType = nextRayType; procVertex->pathSampleDepth++; } } } else { procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample; procVertex->pathSampleN++; procVertex->isTerminated = true; } } __global__ void pt_genPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov, float width, float height, uint32 frameN, uint32 hashedFrameN, PTPathVertex* pathQueue) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint ind = (y * width + x); float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height; float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f); hiprandState_t randstate; hiprand_init(hashedFrameN + ind, 0, 0, &randstate); float au = u + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float av = v + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float3 dir = normalize(camRight * au + camUp * av + camDir); pathQueue[ind] = PTPathVertex(false, make_uint2(x,y), dir, camPos, RAYTYPE_EYE, randstate); } __global__ void pt_assignPathStream_kernel(PTPathVertex** pathStream, uint pathStreamSize, PTPathVertex* pathQueue, uint pathQueueCur, uint pathQueueSize) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= pathStreamSize) { int pathQueueInd = pathQueueCur + ind - pathStreamSize; PTPathVertex* assignSample = nullptr; if (pathQueueInd < pathQueueSize) { assignSample = &pathQueue[pathQueueInd]; } pathStream[ind] = assignSample; } } __global__ void pt_applyPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height, uint frameN, float* result, float* accResult) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= pathQueueSize) return; // add calculating sample to the result if (!pathQueue[x].isTerminated) { pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample; pathQueue[x].pathSampleN++; } if (pathQueue[x].pathSampleN > 0) { float3 sampleResult = pathQueue[x].pathAccumSample / (float)pathQueue[x].pathSampleN; float resultInf = 1.f / (float)(frameN + 1); float oldInf = 1.f - resultInf; uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x; result[ind * 3] = max(resultInf * sampleResult.x + oldInf * result[ind * 3], 0.f); result[ind * 3 + 1] = max(resultInf * sampleResult.y + oldInf * result[ind * 3 + 1], 0.f); result[ind * 3 + 2] = max(resultInf * sampleResult.z + oldInf * result[ind * 3 + 2], 0.f); } } void CleanMem() { freeStreamMem(); freeAllBVHCudaMem(); CUFREE(g_devResultData); CUFREE(g_devAccResultData); } //struct ray_greater_compare //{ // __hd__ bool operator()(const PTPathVertex* vert1, const PTPathVertex* vert2) // { // int vert1Score = (vert1->pathOutDir.x > 0) + (vert1->pathOutDir.y > 0) + (vert1->pathOutDir.z > 0); // int vert2Score = (vert2->pathOutDir.x > 0) + (vert2->pathOutDir.y > 0) + (vert2->pathOutDir.z > 0); // return vert1Score > vert2Score; // } //}; struct is_terminated { __hd__ bool operator()(const PTPathVertex* vert) { return vert->isTerminated; } }; bool Render(NPMathHelper::Vec3 camPos, NPMathHelper::Vec3 camDir, NPMathHelper::Vec3 camUp, float fov, RTScene* scene , float width, float height, float* result) { // Check and allocate everything if (!scene || !scene->GetCompactBVH()->IsValid()) return false; NPMathHelper::Vec3 camRight = camDir.cross(camUp).normalize(); camUp = camRight.cross(camDir).normalize(); g_matLastCamMat = g_matCurCamMat; g_matCurCamMat = NPMathHelper::Mat4x4::lookAt(camPos, camPos + camDir, camUp); g_uCurFrameN = (g_matLastCamMat != g_matCurCamMat) ? 0 : g_uCurFrameN + 1; if (!g_bIsCudaInit || scene->GetIsCudaDirty()) { CleanMem(); g_matLastCamMat = g_matCurCamMat; g_uCurFrameN = 0; initAllSceneCudaMem(scene); allocateStreamMem(width * height); size_t mem_tot; size_t mem_free; hipMemGetInfo(&mem_free, &mem_tot); std::cout << "Memory Used : " << mem_tot-mem_free << "/" << mem_tot << " -> Free " << mem_free << std::endl; } else if (scene->GetIsCudaMaterialDirty()) { updateAllSceneMaterialsCudaMem(scene); g_uCurFrameN = 0; } if (!g_bIsCudaInit) return false; if (!g_devResultData || !g_devAccResultData || g_resultDataSize != (sizeof(float) * 3 * width * height)) { g_resultDataSize = sizeof(float) * 3 * width * height; CUFREE(g_devResultData); hipMalloc((void**)&g_devResultData, g_resultDataSize); CUFREE(g_devAccResultData); hipMalloc((void**)&g_devAccResultData, g_resultDataSize); } float3 f3CamPos = V32F3(camPos); float3 f3CamUp = V32F3(camUp); float3 f3CamDir = V32F3(camDir); float3 f3CamRight = V32F3(camRight); // Kernel go here dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1); dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 renderGrid(ceil(width / (float)block2.x), ceil(height / (float)block2.y), 1); pt_genPathQueue_kernel << < renderGrid, block2 >> > (f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height , g_uCurFrameN, WangHash(g_uCurFrameN), g_devPathQueue); hipDeviceSynchronize(); uint activePathStreamSize = 0; g_uPathQueueCur = 0; while (g_uPathQueueCur < g_uPathQueueSize || activePathStreamSize > 0) { uint tempActivePathStreamSize = activePathStreamSize; pt_assignPathStream_kernel << < dim3(ceil((float)PATHSTREAM_SIZE / (float)block1.x), 1, 1), block1 >> >(g_devPathStream, activePathStreamSize, g_devPathQueue, g_uPathQueueCur, g_uPathQueueSize); //readjust activePathStreamSize activePathStreamSize = min((uint)PATHSTREAM_SIZE, activePathStreamSize + (g_uPathQueueSize - g_uPathQueueCur)); g_uPathQueueCur += activePathStreamSize - tempActivePathStreamSize; hipDeviceSynchronize(); //test sorting ray for more coherent tracing -> it does not improve performance //thrust::sort(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, ray_greater_compare()); pt_traceSample_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize); hipDeviceSynchronize(); //compact pathstream and find activePathStreamSize value PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, is_terminated()); activePathStreamSize = compactedStreamEndItr - g_devPathStream; } pt_applyPathQueueResult_kernel << < dim3(ceil((float)g_uPathQueueSize / (float)block1.x), 1, 1), block1 >> >(g_devPathQueue, g_uPathQueueSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData); // Copy result to host hipMemcpy(result, g_devResultData, g_resultDataSize, hipMemcpyDeviceToHost); return true; } }
52114595de94b284584f518fe7ce125dc1b447fc.cu
#include "cudaRTCommon.h" #include <thrust/remove.h> #include <thrust/execution_policy.h> #define BLOCK_SIZE 16 #define NORMALRAY_BOUND_MAX 10 #define PATHSTREAM_SIZE 1E4*64 namespace cudaRTPTStream { RT_ATTRIBS_N(0) RT_ATTRIBS_BGN RT_ATTRIBS_END enum RAYTYPE { RAYTYPE_EYE = 0, RAYTYPE_DIFF = 1, RAYTYPE_SPEC = 2 }; struct PTPathVertex { uint isTerminated; uint2 pathPixel; float3 pathOutDir; float3 pathVertexPos; float3 pathOutMulTerm; RAYTYPE pathType; float3 pathSample; float3 pathAccumSample; uint pathSampleN; uint pathSampleDepth; curandState randState; __device__ PTPathVertex(uint _isTerminated, uint2 _pathPixel, float3 _pathOutDir, float3 _pathVertexPos, RAYTYPE _pathType, curandState _randState) : isTerminated(_isTerminated) , pathPixel(_pathPixel) , pathOutDir(_pathOutDir) , pathVertexPos(_pathVertexPos) , pathOutMulTerm(make_float3(1.f,1.f,1.f)) , pathType(_pathType) , pathSample(make_float3(0.f, 0.f, 0.f)) , pathAccumSample(make_float3(0.f, 0.f, 0.f)) , pathSampleN(0) , pathSampleDepth(0) , randState(_randState) {} }; PTPathVertex* g_devPathQueue = nullptr; uint g_uPathQueueCur = 0; uint g_uPathQueueSize = 0; PTPathVertex** g_devPathStream = nullptr; uint g_uPathStreamSize = PATHSTREAM_SIZE; void freeStreamMem() { g_uPathQueueCur = g_uPathQueueSize = 0; CUFREE(g_devPathQueue); CUFREE(g_devPathStream); } void allocateStreamMem(uint queueSize = 480000) { g_uPathQueueSize = queueSize; HANDLE_ERROR(cudaMalloc((void**)&g_devPathQueue, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(cudaMemset((void*)g_devPathQueue, 0, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(cudaMalloc((void**)&g_devPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize)); HANDLE_ERROR(cudaMemset((void*)g_devPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize)); } float* g_devResultData = nullptr; float* g_devAccResultData = nullptr; NPMathHelper::Mat4x4 g_matLastCamMat; NPMathHelper::Mat4x4 g_matCurCamMat; uint32 g_uCurFrameN = 0; size_t g_resultDataSize = 0; uint32 WangHash(uint32 a) { a = (a ^ 61) ^ (a >> 16); a = a + (a << 3); a = a ^ (a >> 4); a = a * 0x27d4eb2d; a = a ^ (a >> 15); return a; } __global__ void pt_traceSample_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= activePathStreamSize || pathStream[x]->isTerminated) return; PTPathVertex* procVertex = pathStream[x]; CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir); TracePrimitiveResult traceResult; if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false)) { RTTriangle* tri = &triangles[traceResult.triId]; RTMaterial* mat = &materials[tri->matInd]; RTVertex* v0 = &vertices[tri->vertInd0]; RTVertex* v1 = &vertices[tri->vertInd1]; RTVertex* v2 = &vertices[tri->vertInd2]; float2 uv0 = make_float2(v0->tex._x, v0->tex._y); float2 uv1 = make_float2(v1->tex._x, v1->tex._y); float2 uv2 = make_float2(v2->tex._x, v2->tex._y); float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v; float3 n0 = V32F3(v0->norm); float3 n1 = V32F3(v1->norm); float3 n2 = V32F3(v2->norm); float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v; float3 diff; float3 emissive; float trans; float specular; float metallic; float roughness; float anisotropic; float sheen; float sheenTint; float clearcoat; float clearcoatGloss; GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness , anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss); float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm; { // Get some random microfacet float3 hDir = ImportanceSampleGGX(make_float2(curand_uniform(&procVertex->randState), curand_uniform(&procVertex->randState)), roughness, nl); // Calculate flesnel float voH = vecDot(-1 * ray.dir, hDir); float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic); float3 brdf_f = Fresnel(f0, voH); // Reflected or Refracted float reflProb = lerp(length(brdf_f), 1.0f, metallic); float refrProb = trans; float3 reflDir; float3 refrDir; CURay nextRay = ray; float3 lightMulTerm; RAYTYPE nextRayType = procVertex->pathType; if (refrProb > 0) { bool into = vecDot(nl, norm) > 0.f; float nt = specular * 0.8f + 1.f; float nc = 1.0f; float nnt = into ? nc / nt : nt / nc; float ddn = vecDot(hDir, ray.dir); float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn); if (cos2t < 0.f) { reflProb = 1.0f;// refrProb = 0.f; } else { refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t))); } } if (reflProb > 0) { reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir)); if (vecDot(reflDir, nl) < 0.f) reflProb = 0.f; } // Reflected if (ProbabilityRand(&procVertex->randState, reflProb)) { nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir); // ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); // Microfacet specular = D*G*F / (4*NoL*NoV) // pdf = D * NoH / (4 * VoH) // (G * F * VoH) / (NoV * NoH) float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoH = vecDot(nl, hDir); float NoL = vecDot(nl, reflDir); float G = GeometricVisibility(roughness, NoV, NoL, VoH); //shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive; lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb); nextRayType = RAYTYPE_SPEC; } // Diffused or Transmited else { // Transmited if (ProbabilityRand(&procVertex->randState, refrProb)) { nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float cosine = vecDot(-1 * nl, refrDir); //shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive; lightMulTerm = cosine * diff / (refrProb * (1 - reflProb)); nextRayType = RAYTYPE_SPEC; } // Diffused else { float3 w = nl; float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = vecCross(w, u); u = vecCross(v, w); float r1 = 2.f * M_PI * curand_uniform(&procVertex->randState); float r2cos = sqrtf(curand_uniform(&procVertex->randState)); float r2sin = 1.f - r2cos*r2cos; float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1)); nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, diffDir); //shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive; lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb)); nextRayType = RAYTYPE_DIFF; } } procVertex->pathSample = procVertex->pathSample + vecMul(emissive , procVertex->pathOutMulTerm); float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm); if (/*(procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) ||*/ length(emissive) > 0.f) pixelContrib = 0.f; if (curand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX) { procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample; procVertex->pathSampleN++; procVertex->isTerminated = true; } else { procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm); procVertex->pathOutDir = nextRay.dir; procVertex->pathVertexPos = nextRay.orig; procVertex->pathType = nextRayType; procVertex->pathSampleDepth++; } } } else { procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample; procVertex->pathSampleN++; procVertex->isTerminated = true; } } __global__ void pt_genPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov, float width, float height, uint32 frameN, uint32 hashedFrameN, PTPathVertex* pathQueue) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint ind = (y * width + x); float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height; float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f); curandState randstate; curand_init(hashedFrameN + ind, 0, 0, &randstate); float au = u + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float av = v + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float3 dir = normalize(camRight * au + camUp * av + camDir); pathQueue[ind] = PTPathVertex(false, make_uint2(x,y), dir, camPos, RAYTYPE_EYE, randstate); } __global__ void pt_assignPathStream_kernel(PTPathVertex** pathStream, uint pathStreamSize, PTPathVertex* pathQueue, uint pathQueueCur, uint pathQueueSize) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= pathStreamSize) { int pathQueueInd = pathQueueCur + ind - pathStreamSize; PTPathVertex* assignSample = nullptr; if (pathQueueInd < pathQueueSize) { assignSample = &pathQueue[pathQueueInd]; } pathStream[ind] = assignSample; } } __global__ void pt_applyPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height, uint frameN, float* result, float* accResult) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= pathQueueSize) return; // add calculating sample to the result if (!pathQueue[x].isTerminated) { pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample; pathQueue[x].pathSampleN++; } if (pathQueue[x].pathSampleN > 0) { float3 sampleResult = pathQueue[x].pathAccumSample / (float)pathQueue[x].pathSampleN; float resultInf = 1.f / (float)(frameN + 1); float oldInf = 1.f - resultInf; uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x; result[ind * 3] = max(resultInf * sampleResult.x + oldInf * result[ind * 3], 0.f); result[ind * 3 + 1] = max(resultInf * sampleResult.y + oldInf * result[ind * 3 + 1], 0.f); result[ind * 3 + 2] = max(resultInf * sampleResult.z + oldInf * result[ind * 3 + 2], 0.f); } } void CleanMem() { freeStreamMem(); freeAllBVHCudaMem(); CUFREE(g_devResultData); CUFREE(g_devAccResultData); } //struct ray_greater_compare //{ // __hd__ bool operator()(const PTPathVertex* vert1, const PTPathVertex* vert2) // { // int vert1Score = (vert1->pathOutDir.x > 0) + (vert1->pathOutDir.y > 0) + (vert1->pathOutDir.z > 0); // int vert2Score = (vert2->pathOutDir.x > 0) + (vert2->pathOutDir.y > 0) + (vert2->pathOutDir.z > 0); // return vert1Score > vert2Score; // } //}; struct is_terminated { __hd__ bool operator()(const PTPathVertex* vert) { return vert->isTerminated; } }; bool Render(NPMathHelper::Vec3 camPos, NPMathHelper::Vec3 camDir, NPMathHelper::Vec3 camUp, float fov, RTScene* scene , float width, float height, float* result) { // Check and allocate everything if (!scene || !scene->GetCompactBVH()->IsValid()) return false; NPMathHelper::Vec3 camRight = camDir.cross(camUp).normalize(); camUp = camRight.cross(camDir).normalize(); g_matLastCamMat = g_matCurCamMat; g_matCurCamMat = NPMathHelper::Mat4x4::lookAt(camPos, camPos + camDir, camUp); g_uCurFrameN = (g_matLastCamMat != g_matCurCamMat) ? 0 : g_uCurFrameN + 1; if (!g_bIsCudaInit || scene->GetIsCudaDirty()) { CleanMem(); g_matLastCamMat = g_matCurCamMat; g_uCurFrameN = 0; initAllSceneCudaMem(scene); allocateStreamMem(width * height); size_t mem_tot; size_t mem_free; cudaMemGetInfo(&mem_free, &mem_tot); std::cout << "Memory Used : " << mem_tot-mem_free << "/" << mem_tot << " -> Free " << mem_free << std::endl; } else if (scene->GetIsCudaMaterialDirty()) { updateAllSceneMaterialsCudaMem(scene); g_uCurFrameN = 0; } if (!g_bIsCudaInit) return false; if (!g_devResultData || !g_devAccResultData || g_resultDataSize != (sizeof(float) * 3 * width * height)) { g_resultDataSize = sizeof(float) * 3 * width * height; CUFREE(g_devResultData); cudaMalloc((void**)&g_devResultData, g_resultDataSize); CUFREE(g_devAccResultData); cudaMalloc((void**)&g_devAccResultData, g_resultDataSize); } float3 f3CamPos = V32F3(camPos); float3 f3CamUp = V32F3(camUp); float3 f3CamDir = V32F3(camDir); float3 f3CamRight = V32F3(camRight); // Kernel go here dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1); dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 renderGrid(ceil(width / (float)block2.x), ceil(height / (float)block2.y), 1); pt_genPathQueue_kernel << < renderGrid, block2 >> > (f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height , g_uCurFrameN, WangHash(g_uCurFrameN), g_devPathQueue); cudaDeviceSynchronize(); uint activePathStreamSize = 0; g_uPathQueueCur = 0; while (g_uPathQueueCur < g_uPathQueueSize || activePathStreamSize > 0) { uint tempActivePathStreamSize = activePathStreamSize; pt_assignPathStream_kernel << < dim3(ceil((float)PATHSTREAM_SIZE / (float)block1.x), 1, 1), block1 >> >(g_devPathStream, activePathStreamSize, g_devPathQueue, g_uPathQueueCur, g_uPathQueueSize); //readjust activePathStreamSize activePathStreamSize = min((uint)PATHSTREAM_SIZE, activePathStreamSize + (g_uPathQueueSize - g_uPathQueueCur)); g_uPathQueueCur += activePathStreamSize - tempActivePathStreamSize; cudaDeviceSynchronize(); //test sorting ray for more coherent tracing -> it does not improve performance //thrust::sort(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, ray_greater_compare()); pt_traceSample_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize); cudaDeviceSynchronize(); //compact pathstream and find activePathStreamSize value PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, is_terminated()); activePathStreamSize = compactedStreamEndItr - g_devPathStream; } pt_applyPathQueueResult_kernel << < dim3(ceil((float)g_uPathQueueSize / (float)block1.x), 1, 1), block1 >> >(g_devPathQueue, g_uPathQueueSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData); // Copy result to host cudaMemcpy(result, g_devResultData, g_resultDataSize, cudaMemcpyDeviceToHost); return true; } }
0d401de359ced30ac0b16342667205866a9b5604.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dump_particles.h" #include "utils/simple_serializer.h" #include "utils/time_stamp.h" #include <mirheo/core/pvs/rod_vector.h> #include <mirheo/core/simulation.h> #include <mirheo/core/utils/config.h> #include <mirheo/core/utils/cuda_common.h> #include <mirheo/core/utils/folders.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/xdmf/type_map.h> namespace mirheo { namespace dump_particles_kernels { template <typename T> __global__ void copyObjectDataToParticles(int objSize, int nObjects, const T *srcObjData, T *dstParticleData) { const int pid = threadIdx.x + blockIdx.x * blockDim.x; const int objId = pid / objSize; if (objId >= nObjects) return; dstParticleData[pid] = srcObjData[objId]; } template <typename T> __global__ void copyRodDataToParticles(int numBiSegmentsPerObject, int objSize, int nObjects, const T *rodData, T *particleData) { constexpr int stride = 5; const int pid = threadIdx.x + blockIdx.x * blockDim.x; const int objId = pid / objSize; const int localPartId = pid % objSize; const int localBisegId = math::min(localPartId / stride, numBiSegmentsPerObject); // min because of last particle const int bid = objId * numBiSegmentsPerObject + localBisegId; if (objId < nObjects) particleData[pid] = rodData[bid]; } } // namespace dump_particles_kernels ParticleSenderPlugin::ParticleSenderPlugin(const MirState *state, std::string name, std::string pvName, int dumpEvery, const std::vector<std::string>& channelNames) : SimulationPlugin(state, name), pvName_(pvName), dumpEvery_(dumpEvery), channelNames_(channelNames) { channelData_.resize(channelNames_.size()); } ParticleSenderPlugin::ParticleSenderPlugin(const MirState *state, Loader& loader, const ConfigObject& config) : ParticleSenderPlugin(state, config["name"], config["pvName"], config["dumpEvery"], loader.load<std::vector<std::string>>(config["channelNames"])) {} ParticleSenderPlugin::~ParticleSenderPlugin() = default; void ParticleSenderPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm) { SimulationPlugin::setup(simulation, comm, interComm); pv_ = simulation->getPVbyNameOrDie(pvName_); info("Plugin %s initialized for the following particle vector: %s", getCName(), pvName_.c_str()); } void ParticleSenderPlugin::handshake() { std::vector<XDMF::Channel::DataForm> dataForms; std::vector<XDMF::Channel::NumberType> numberTypes; std::vector<std::string> typeDescriptorsStr; auto pushChannelInfos = [&dataForms, &numberTypes, &typeDescriptorsStr](const DataManager::ChannelDescription& desc) { mpark::visit([&dataForms, &numberTypes, &typeDescriptorsStr](auto pinnedBufferPtr) { using T = typename std::remove_pointer<decltype(pinnedBufferPtr)>::type::value_type; dataForms .push_back(XDMF::getDataForm <T>()); numberTypes .push_back(XDMF::getNumberType<T>()); typeDescriptorsStr.push_back(typeDescriptorToString(DataTypeWrapper<T>{})); }, desc.varDataPtr); }; auto ov = dynamic_cast<ObjectVector*>(pv_); auto rv = dynamic_cast<RodVector*>(pv_); for (const auto& name : channelNames_) { if (pv_->local()->dataPerParticle.checkChannelExists(name)) { const auto& desc = pv_->local()->dataPerParticle.getChannelDescOrDie(name); pushChannelInfos(desc); } else if (ov != nullptr && ov->local()->dataPerObject.checkChannelExists(name)) { const auto& desc = ov->local()->dataPerObject.getChannelDescOrDie(name); pushChannelInfos(desc); } else if (rv != nullptr && rv->local()->dataPerBisegment.checkChannelExists(name)) { const auto& desc = rv->local()->dataPerBisegment.getChannelDescOrDie(name); pushChannelInfos(desc); } else { die("Channel not found: '%s' in particle vector '%s'", getCName(), pv_->getCName()); } } _waitPrevSend(); SimpleSerializer::serialize(sendBuffer_, channelNames_, dataForms, numberTypes, typeDescriptorsStr); _send(sendBuffer_); } static inline void copyData(ParticleVector *pv, const std::string& channelName, HostBuffer<char>& dst, hipStream_t stream) { auto srcContainer = pv->local()->dataPerParticle.getGenericData(channelName); dst.genericCopy(srcContainer, stream); } static inline void copyData(ObjectVector *ov, const std::string& channelName, HostBuffer<char>& dst, DeviceBuffer<char>& workSpace, hipStream_t stream) { auto lov = ov->local(); const auto& srcDesc = lov->dataPerObject.getChannelDescOrDie(channelName); const int objSize = lov->getObjectSize(); const int nObjects = lov->getNumObjects(); mpark::visit([&](auto srcBufferPtr) { using T = typename std::remove_pointer<decltype(srcBufferPtr)>::type::value_type; constexpr int nthreads = 128; const int nParts = objSize * nObjects; const int nblocks = getNblocks(nParts, nthreads); workSpace.resize_anew(nParts * sizeof(T)); SAFE_KERNEL_LAUNCH( dump_particles_kernels::copyObjectDataToParticles, nblocks, nthreads, 0, stream, objSize, nObjects, srcBufferPtr->devPtr(), reinterpret_cast<T*>(workSpace.devPtr())); }, srcDesc.varDataPtr); dst.genericCopy(&workSpace, stream); } static inline void copyData(RodVector *rv, const std::string& channelName, HostBuffer<char>& dst, DeviceBuffer<char>& workSpace, hipStream_t stream) { auto lrv = rv->local(); const auto& srcDesc = lrv->dataPerBisegment.getChannelDescOrDie(channelName); const int objSize = lrv->getObjectSize(); const int nObjects = lrv->getNumObjects(); const int numBiSegmentsPerObject = lrv->getNumSegmentsPerRod() - 1; mpark::visit([&](auto srcBufferPtr) { using T = typename std::remove_pointer<decltype(srcBufferPtr)>::type::value_type; constexpr int nthreads = 128; const int nParts = objSize * nObjects; const int nblocks = getNblocks(nParts, nthreads); workSpace.resize_anew(nParts * sizeof(T)); SAFE_KERNEL_LAUNCH( dump_particles_kernels::copyRodDataToParticles, nblocks, nthreads, 0, stream, numBiSegmentsPerObject, objSize, nObjects, srcBufferPtr->devPtr(), reinterpret_cast<T*>(workSpace.devPtr())); }, srcDesc.varDataPtr); dst.genericCopy(&workSpace, stream); } void ParticleSenderPlugin::beforeForces(hipStream_t stream) { if (!isTimeEvery(getState(), dumpEvery_)) return; positions_ .genericCopy(&pv_->local()->positions() , stream); velocities_.genericCopy(&pv_->local()->velocities(), stream); auto ov = dynamic_cast<ObjectVector*>(pv_); auto rv = dynamic_cast<RodVector*>(pv_); for (size_t i = 0; i < channelNames_.size(); ++i) { auto name = channelNames_[i]; if (pv_->local()->dataPerParticle.checkChannelExists(name)) { copyData(pv_, name, channelData_[i], stream); } else if (ov != nullptr && ov->local()->dataPerObject.checkChannelExists(name)) { copyData(ov, name, channelData_[i], workSpace_, stream); } else if (rv != nullptr && rv->local()->dataPerBisegment.checkChannelExists(name)) { copyData(rv, name, channelData_[i], workSpace_, stream); } else { die("Channel not found: '%s' in particle vector '%s'", getCName(), pv_->getCName()); } } } void ParticleSenderPlugin::serializeAndSend(__UNUSED hipStream_t stream) { if (!isTimeEvery(getState(), dumpEvery_)) return; debug2("Plugin %s is sending now data", getCName()); for (auto& p : positions_) { auto r = getState()->domain.local2global(make_real3(p)); p.x = r.x; p.y = r.y; p.z = r.z; } const MirState::StepType timeStamp = getTimeStamp(getState(), dumpEvery_); debug2("Plugin %s is packing now data consisting of %zu particles", getCName(), positions_.size()); _waitPrevSend(); SimpleSerializer::serialize(sendBuffer_, timeStamp, getState()->currentTime, positions_, velocities_, channelData_); _send(sendBuffer_); } void ParticleSenderPlugin::saveSnapshotAndRegister(Saver& saver) { saver.registerObject(this, _saveSnapshot(saver, "ParticleSenderPlugin")); } ConfigObject ParticleSenderPlugin::_saveSnapshot(Saver& saver, const std::string& typeName) { ConfigObject config = SimulationPlugin::_saveSnapshot(saver, typeName); config.emplace("pvName", saver(pvName_)); config.emplace("dumpEvery", saver(dumpEvery_)); config.emplace("channelNames", saver(channelNames_)); return config; } ParticleDumperPlugin::ParticleDumperPlugin(std::string name, std::string path) : PostprocessPlugin(name), path_(path), positions_(std::make_shared<std::vector<real3>>()) {} ParticleDumperPlugin::ParticleDumperPlugin(Loader&, const ConfigObject& config) : ParticleDumperPlugin(config["name"], config["path"]) {} ParticleDumperPlugin::~ParticleDumperPlugin() = default; void ParticleDumperPlugin::handshake() { auto req = waitData(); MPI_Check( MPI_Wait(&req, MPI_STATUS_IGNORE) ); recv(); std::vector<std::string> names; std::vector<XDMF::Channel::DataForm> dataForms; std::vector<XDMF::Channel::NumberType> numberTypes; std::vector<std::string> typeDescriptorsStr; SimpleSerializer::deserialize(data_, names, dataForms, numberTypes, typeDescriptorsStr); auto initChannel = [] (const std::string& name, XDMF::Channel::DataForm dataForm, XDMF::Channel::NumberType numberType, TypeDescriptor datatype, XDMF::Channel::NeedShift needShift = XDMF::Channel::NeedShift::False) { return XDMF::Channel{name, nullptr, dataForm, numberType, datatype, needShift}; }; // Velocity and id are special channels which are always present std::string allNames = "'velocity', 'id'"; channels_.push_back(initChannel("velocity", XDMF::Channel::DataForm::Vector, XDMF::getNumberType<real>(), DataTypeWrapper<real>())); channels_.push_back(initChannel("id", XDMF::Channel::DataForm::Scalar, XDMF::Channel::NumberType::Int64, DataTypeWrapper<int64_t>())); for (size_t i = 0; i < names.size(); ++i) { const std::string& name = names[i]; const auto dataForm = dataForms[i]; const auto numberType = numberTypes[i]; const auto dataType = stringToTypeDescriptor(typeDescriptorsStr[i]); const auto channel = initChannel(name, dataForm, numberType, dataType); channels_.push_back(channel); allNames += ", '" + name + "'"; } // Create the required folder createFoldersCollective(comm_, getParentPath(path_)); debug2("Plugin '%s' was set up to dump channels %s. Path is %s", getCName(), allNames.c_str(), path_.c_str()); } static void unpackParticles(const std::vector<real4> &pos4, const std::vector<real4> &vel4, std::vector<real3> &pos, std::vector<real3> &vel, std::vector<int64_t> &ids) { const size_t n = pos4.size(); pos.resize(n); vel.resize(n); ids.resize(n); for (size_t i = 0; i < n; ++i) { auto p = Particle(pos4[i], vel4[i]); pos[i] = p.r; vel[i] = p.u; ids[i] = p.getId(); } } void ParticleDumperPlugin::_recvAndUnpack(MirState::TimeType &time, MirState::StepType& timeStamp) { int c = 0; SimpleSerializer::deserialize(data_, timeStamp, time, pos4_, vel4_, channelData_); unpackParticles(pos4_, vel4_, *positions_, velocities_, ids_); channels_[c++].data = velocities_.data(); channels_[c++].data = ids_.data(); for (auto& cd : channelData_) channels_[c++].data = cd.data(); } void ParticleDumperPlugin::deserialize() { debug2("Plugin '%s' will dump right now", getCName()); MirState::TimeType time; MirState::StepType timeStamp; _recvAndUnpack(time, timeStamp); std::string fname = path_ + createStrZeroPadded(timeStamp, zeroPadding_); XDMF::VertexGrid grid(positions_, comm_); XDMF::write(fname, &grid, channels_, time, comm_); } void ParticleDumperPlugin::saveSnapshotAndRegister(Saver& saver) { saver.registerObject(this, _saveSnapshot(saver, "ParticleDumperPlugin")); } ConfigObject ParticleDumperPlugin::_saveSnapshot(Saver& saver, const std::string& typeName) { ConfigObject config = PostprocessPlugin::_saveSnapshot(saver, typeName); config.emplace("path", saver(path_)); return config; } } // namespace mirheo
0d401de359ced30ac0b16342667205866a9b5604.cu
#include "dump_particles.h" #include "utils/simple_serializer.h" #include "utils/time_stamp.h" #include <mirheo/core/pvs/rod_vector.h> #include <mirheo/core/simulation.h> #include <mirheo/core/utils/config.h> #include <mirheo/core/utils/cuda_common.h> #include <mirheo/core/utils/folders.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/xdmf/type_map.h> namespace mirheo { namespace dump_particles_kernels { template <typename T> __global__ void copyObjectDataToParticles(int objSize, int nObjects, const T *srcObjData, T *dstParticleData) { const int pid = threadIdx.x + blockIdx.x * blockDim.x; const int objId = pid / objSize; if (objId >= nObjects) return; dstParticleData[pid] = srcObjData[objId]; } template <typename T> __global__ void copyRodDataToParticles(int numBiSegmentsPerObject, int objSize, int nObjects, const T *rodData, T *particleData) { constexpr int stride = 5; const int pid = threadIdx.x + blockIdx.x * blockDim.x; const int objId = pid / objSize; const int localPartId = pid % objSize; const int localBisegId = math::min(localPartId / stride, numBiSegmentsPerObject); // min because of last particle const int bid = objId * numBiSegmentsPerObject + localBisegId; if (objId < nObjects) particleData[pid] = rodData[bid]; } } // namespace dump_particles_kernels ParticleSenderPlugin::ParticleSenderPlugin(const MirState *state, std::string name, std::string pvName, int dumpEvery, const std::vector<std::string>& channelNames) : SimulationPlugin(state, name), pvName_(pvName), dumpEvery_(dumpEvery), channelNames_(channelNames) { channelData_.resize(channelNames_.size()); } ParticleSenderPlugin::ParticleSenderPlugin(const MirState *state, Loader& loader, const ConfigObject& config) : ParticleSenderPlugin(state, config["name"], config["pvName"], config["dumpEvery"], loader.load<std::vector<std::string>>(config["channelNames"])) {} ParticleSenderPlugin::~ParticleSenderPlugin() = default; void ParticleSenderPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm) { SimulationPlugin::setup(simulation, comm, interComm); pv_ = simulation->getPVbyNameOrDie(pvName_); info("Plugin %s initialized for the following particle vector: %s", getCName(), pvName_.c_str()); } void ParticleSenderPlugin::handshake() { std::vector<XDMF::Channel::DataForm> dataForms; std::vector<XDMF::Channel::NumberType> numberTypes; std::vector<std::string> typeDescriptorsStr; auto pushChannelInfos = [&dataForms, &numberTypes, &typeDescriptorsStr](const DataManager::ChannelDescription& desc) { mpark::visit([&dataForms, &numberTypes, &typeDescriptorsStr](auto pinnedBufferPtr) { using T = typename std::remove_pointer<decltype(pinnedBufferPtr)>::type::value_type; dataForms .push_back(XDMF::getDataForm <T>()); numberTypes .push_back(XDMF::getNumberType<T>()); typeDescriptorsStr.push_back(typeDescriptorToString(DataTypeWrapper<T>{})); }, desc.varDataPtr); }; auto ov = dynamic_cast<ObjectVector*>(pv_); auto rv = dynamic_cast<RodVector*>(pv_); for (const auto& name : channelNames_) { if (pv_->local()->dataPerParticle.checkChannelExists(name)) { const auto& desc = pv_->local()->dataPerParticle.getChannelDescOrDie(name); pushChannelInfos(desc); } else if (ov != nullptr && ov->local()->dataPerObject.checkChannelExists(name)) { const auto& desc = ov->local()->dataPerObject.getChannelDescOrDie(name); pushChannelInfos(desc); } else if (rv != nullptr && rv->local()->dataPerBisegment.checkChannelExists(name)) { const auto& desc = rv->local()->dataPerBisegment.getChannelDescOrDie(name); pushChannelInfos(desc); } else { die("Channel not found: '%s' in particle vector '%s'", getCName(), pv_->getCName()); } } _waitPrevSend(); SimpleSerializer::serialize(sendBuffer_, channelNames_, dataForms, numberTypes, typeDescriptorsStr); _send(sendBuffer_); } static inline void copyData(ParticleVector *pv, const std::string& channelName, HostBuffer<char>& dst, cudaStream_t stream) { auto srcContainer = pv->local()->dataPerParticle.getGenericData(channelName); dst.genericCopy(srcContainer, stream); } static inline void copyData(ObjectVector *ov, const std::string& channelName, HostBuffer<char>& dst, DeviceBuffer<char>& workSpace, cudaStream_t stream) { auto lov = ov->local(); const auto& srcDesc = lov->dataPerObject.getChannelDescOrDie(channelName); const int objSize = lov->getObjectSize(); const int nObjects = lov->getNumObjects(); mpark::visit([&](auto srcBufferPtr) { using T = typename std::remove_pointer<decltype(srcBufferPtr)>::type::value_type; constexpr int nthreads = 128; const int nParts = objSize * nObjects; const int nblocks = getNblocks(nParts, nthreads); workSpace.resize_anew(nParts * sizeof(T)); SAFE_KERNEL_LAUNCH( dump_particles_kernels::copyObjectDataToParticles, nblocks, nthreads, 0, stream, objSize, nObjects, srcBufferPtr->devPtr(), reinterpret_cast<T*>(workSpace.devPtr())); }, srcDesc.varDataPtr); dst.genericCopy(&workSpace, stream); } static inline void copyData(RodVector *rv, const std::string& channelName, HostBuffer<char>& dst, DeviceBuffer<char>& workSpace, cudaStream_t stream) { auto lrv = rv->local(); const auto& srcDesc = lrv->dataPerBisegment.getChannelDescOrDie(channelName); const int objSize = lrv->getObjectSize(); const int nObjects = lrv->getNumObjects(); const int numBiSegmentsPerObject = lrv->getNumSegmentsPerRod() - 1; mpark::visit([&](auto srcBufferPtr) { using T = typename std::remove_pointer<decltype(srcBufferPtr)>::type::value_type; constexpr int nthreads = 128; const int nParts = objSize * nObjects; const int nblocks = getNblocks(nParts, nthreads); workSpace.resize_anew(nParts * sizeof(T)); SAFE_KERNEL_LAUNCH( dump_particles_kernels::copyRodDataToParticles, nblocks, nthreads, 0, stream, numBiSegmentsPerObject, objSize, nObjects, srcBufferPtr->devPtr(), reinterpret_cast<T*>(workSpace.devPtr())); }, srcDesc.varDataPtr); dst.genericCopy(&workSpace, stream); } void ParticleSenderPlugin::beforeForces(cudaStream_t stream) { if (!isTimeEvery(getState(), dumpEvery_)) return; positions_ .genericCopy(&pv_->local()->positions() , stream); velocities_.genericCopy(&pv_->local()->velocities(), stream); auto ov = dynamic_cast<ObjectVector*>(pv_); auto rv = dynamic_cast<RodVector*>(pv_); for (size_t i = 0; i < channelNames_.size(); ++i) { auto name = channelNames_[i]; if (pv_->local()->dataPerParticle.checkChannelExists(name)) { copyData(pv_, name, channelData_[i], stream); } else if (ov != nullptr && ov->local()->dataPerObject.checkChannelExists(name)) { copyData(ov, name, channelData_[i], workSpace_, stream); } else if (rv != nullptr && rv->local()->dataPerBisegment.checkChannelExists(name)) { copyData(rv, name, channelData_[i], workSpace_, stream); } else { die("Channel not found: '%s' in particle vector '%s'", getCName(), pv_->getCName()); } } } void ParticleSenderPlugin::serializeAndSend(__UNUSED cudaStream_t stream) { if (!isTimeEvery(getState(), dumpEvery_)) return; debug2("Plugin %s is sending now data", getCName()); for (auto& p : positions_) { auto r = getState()->domain.local2global(make_real3(p)); p.x = r.x; p.y = r.y; p.z = r.z; } const MirState::StepType timeStamp = getTimeStamp(getState(), dumpEvery_); debug2("Plugin %s is packing now data consisting of %zu particles", getCName(), positions_.size()); _waitPrevSend(); SimpleSerializer::serialize(sendBuffer_, timeStamp, getState()->currentTime, positions_, velocities_, channelData_); _send(sendBuffer_); } void ParticleSenderPlugin::saveSnapshotAndRegister(Saver& saver) { saver.registerObject(this, _saveSnapshot(saver, "ParticleSenderPlugin")); } ConfigObject ParticleSenderPlugin::_saveSnapshot(Saver& saver, const std::string& typeName) { ConfigObject config = SimulationPlugin::_saveSnapshot(saver, typeName); config.emplace("pvName", saver(pvName_)); config.emplace("dumpEvery", saver(dumpEvery_)); config.emplace("channelNames", saver(channelNames_)); return config; } ParticleDumperPlugin::ParticleDumperPlugin(std::string name, std::string path) : PostprocessPlugin(name), path_(path), positions_(std::make_shared<std::vector<real3>>()) {} ParticleDumperPlugin::ParticleDumperPlugin(Loader&, const ConfigObject& config) : ParticleDumperPlugin(config["name"], config["path"]) {} ParticleDumperPlugin::~ParticleDumperPlugin() = default; void ParticleDumperPlugin::handshake() { auto req = waitData(); MPI_Check( MPI_Wait(&req, MPI_STATUS_IGNORE) ); recv(); std::vector<std::string> names; std::vector<XDMF::Channel::DataForm> dataForms; std::vector<XDMF::Channel::NumberType> numberTypes; std::vector<std::string> typeDescriptorsStr; SimpleSerializer::deserialize(data_, names, dataForms, numberTypes, typeDescriptorsStr); auto initChannel = [] (const std::string& name, XDMF::Channel::DataForm dataForm, XDMF::Channel::NumberType numberType, TypeDescriptor datatype, XDMF::Channel::NeedShift needShift = XDMF::Channel::NeedShift::False) { return XDMF::Channel{name, nullptr, dataForm, numberType, datatype, needShift}; }; // Velocity and id are special channels which are always present std::string allNames = "'velocity', 'id'"; channels_.push_back(initChannel("velocity", XDMF::Channel::DataForm::Vector, XDMF::getNumberType<real>(), DataTypeWrapper<real>())); channels_.push_back(initChannel("id", XDMF::Channel::DataForm::Scalar, XDMF::Channel::NumberType::Int64, DataTypeWrapper<int64_t>())); for (size_t i = 0; i < names.size(); ++i) { const std::string& name = names[i]; const auto dataForm = dataForms[i]; const auto numberType = numberTypes[i]; const auto dataType = stringToTypeDescriptor(typeDescriptorsStr[i]); const auto channel = initChannel(name, dataForm, numberType, dataType); channels_.push_back(channel); allNames += ", '" + name + "'"; } // Create the required folder createFoldersCollective(comm_, getParentPath(path_)); debug2("Plugin '%s' was set up to dump channels %s. Path is %s", getCName(), allNames.c_str(), path_.c_str()); } static void unpackParticles(const std::vector<real4> &pos4, const std::vector<real4> &vel4, std::vector<real3> &pos, std::vector<real3> &vel, std::vector<int64_t> &ids) { const size_t n = pos4.size(); pos.resize(n); vel.resize(n); ids.resize(n); for (size_t i = 0; i < n; ++i) { auto p = Particle(pos4[i], vel4[i]); pos[i] = p.r; vel[i] = p.u; ids[i] = p.getId(); } } void ParticleDumperPlugin::_recvAndUnpack(MirState::TimeType &time, MirState::StepType& timeStamp) { int c = 0; SimpleSerializer::deserialize(data_, timeStamp, time, pos4_, vel4_, channelData_); unpackParticles(pos4_, vel4_, *positions_, velocities_, ids_); channels_[c++].data = velocities_.data(); channels_[c++].data = ids_.data(); for (auto& cd : channelData_) channels_[c++].data = cd.data(); } void ParticleDumperPlugin::deserialize() { debug2("Plugin '%s' will dump right now", getCName()); MirState::TimeType time; MirState::StepType timeStamp; _recvAndUnpack(time, timeStamp); std::string fname = path_ + createStrZeroPadded(timeStamp, zeroPadding_); XDMF::VertexGrid grid(positions_, comm_); XDMF::write(fname, &grid, channels_, time, comm_); } void ParticleDumperPlugin::saveSnapshotAndRegister(Saver& saver) { saver.registerObject(this, _saveSnapshot(saver, "ParticleDumperPlugin")); } ConfigObject ParticleDumperPlugin::_saveSnapshot(Saver& saver, const std::string& typeName) { ConfigObject config = PostprocessPlugin::_saveSnapshot(saver, typeName); config.emplace("path", saver(path_)); return config; } } // namespace mirheo
867620b8c7551e5be30c8577ccc75817991cedee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../neuralnet/cudahelpers.h" #include <stdexcept> #if __CUDA_ARCH__ >= 530 #define CUDA_SUPPORTS_FP16 #endif //TODO maybe tune this number, it varies by GPU static const int targetNumThreads = 512; //-------------------------------------------------------------------------------------------------------------- template <typename T> __global__ void channelConcatKernel( const T* inA, const T* inB, T* out, int chwA, int chwB, int numBlocksA, int numBlocksB, int n ) { if(blockIdx.x < numBlocksA) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < chwA) { int nchwA = n*chwA; int chwOut = (chwA+chwB); int aIdx = index; int outIdx = index; while(aIdx < nchwA) { out[outIdx] = inA[aIdx]; aIdx += chwA; outIdx += chwOut; } } } else { int index = (blockIdx.x - numBlocksA) * blockDim.x + threadIdx.x; if(index < chwB) { int nchwB = n*chwB; int chwOut = (chwA+chwB); int bIdx = index; int outIdx = chwA+index; while(bIdx < nchwB) { out[outIdx] = inB[bIdx]; bIdx += chwB; outIdx += chwOut; } } } } template <typename T> void customCudaChannelConcatTemplate(const T* inA, const T* inB, T* out, int chwA, int chwB, int n) { int blockSize = targetNumThreads; int numBlocksA = (chwA + blockSize-1) / blockSize; int numBlocksB = (chwB + blockSize-1) / blockSize; int numBlocks = numBlocksA + numBlocksB; hipLaunchKernelGGL(( channelConcatKernel), dim3(numBlocks), dim3(blockSize), 0, 0, inA,inB,out,chwA,chwB,numBlocksA,numBlocksB,n); } template void customCudaChannelConcatTemplate<float>(const float* inA, const float* inB, float* out, int chwA, int chwB, int n); template void customCudaChannelConcatTemplate<half>(const half* inA, const half* inB, half* out, int chwA, int chwB, int n); void customCudaChannelConcat(const float* inA, const float* inB, float* out, int chwA, int chwB, int n) { customCudaChannelConcatTemplate<float>(inA,inB,out,chwA,chwB,n); } void customCudaChannelConcat(const half* inA, const half* inB, half* out, int chwA, int chwB, int n) { customCudaChannelConcatTemplate<half>(inA,inB,out,chwA,chwB,n); } //-------------------------------------------------------------------------------------------------------------- template <typename T> __global__ void extractChannel0KernelNHWC(const T *in, T* out, int nhwSize, int cSize) { int nhwIdx = blockIdx.x * blockDim.x + threadIdx.x; if(nhwIdx < nhwSize) { out[nhwIdx] = in[nhwIdx*cSize]; } } template <typename T> void customCudaChannel0ExtractNHWCTemplate(const T *in, T* out, int n, int hw, int c) { int nhw = n*hw; int blockSize = targetNumThreads; int numBlocks = (nhw+blockSize-1)/blockSize; hipLaunchKernelGGL(( extractChannel0KernelNHWC), dim3(numBlocks),dim3(blockSize), 0, 0, in,out,nhw,c); } template <typename T> __global__ void extractChannel0KernelNCHW(const T *in, T* out, int nSize, int cSize, int hwSize) { int hwIdx = blockIdx.x * blockDim.x + threadIdx.x; int nIdx = blockIdx.y * blockDim.y + threadIdx.y; if(hwIdx < hwSize && nIdx < nSize) { out[nIdx * hwSize + hwIdx] = in[nIdx * cSize * hwSize + hwIdx]; } } template <typename T> void customCudaChannel0ExtractNCHWTemplate(const T *in, T* out, int nSize, int cSize, int hwSize) { int hwThreads; int hwBlocks; int nThreads; int nBlocks; if(hwSize > targetNumThreads) { hwThreads = targetNumThreads/2; hwBlocks = (hwSize + hwThreads - 1) / hwThreads; nThreads = 1; nBlocks = nSize; } else if(hwSize > targetNumThreads/2) { hwThreads = hwSize; hwBlocks = 1; nThreads = 1; nBlocks = nSize; } else { hwThreads = hwSize; hwBlocks = 1; nThreads = targetNumThreads / hwSize; nBlocks = (nSize + nThreads - 1) / nThreads; } if(nBlocks > 65536) throw std::runtime_error("customCudaChannel0ExtractNCHW: nSize too large given hwSize"); dim3 grid(hwBlocks,nBlocks,1); dim3 threads(hwThreads,nThreads,1); hipLaunchKernelGGL(( extractChannel0KernelNCHW), dim3(grid),dim3(threads), 0, 0, in,out,nSize,cSize,hwSize); } void customCudaChannel0ExtractNCHW(const float* in, float* out, int n, int c, int hw) { customCudaChannel0ExtractNCHWTemplate<float>(in,out,n,c,hw); } void customCudaChannel0ExtractNCHW(const half* in, half* out, int n, int c, int hw) { customCudaChannel0ExtractNCHWTemplate<half>(in,out,n,c,hw); } void customCudaChannel0ExtractNHWC(const float* in, float* out, int n, int hw, int c) { customCudaChannel0ExtractNHWCTemplate<float>(in,out,n,hw,c); } void customCudaChannel0ExtractNHWC(const half* in, half* out, int n, int hw, int c) { customCudaChannel0ExtractNHWCTemplate<half>(in,out,n,hw,c); } //-------------------------------------------------------------------------------------------------------------- // template <typename T> // struct linear_index_to_row_index : public thrust::unary_function<T,T> { // T len; // __host__ __device__ linear_index_to_row_index(T len) : len(len) {} // __host__ __device__ T operator()(T i) { return i / len; } // }; // void customCudaPoolRowsSumNCHW(float* in, float* out, int nc, int xy) { // thrust::device_ptr<float> inThrust = thrust::device_pointer_cast(in); // thrust::device_ptr<float> outThrust = thrust::device_pointer_cast(out); // thrust::reduce_by_key( // thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(xy)), // thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(xy)) + (nc*xy), // inThrust, // thrust::make_discard_iterator(), // outThrust // ); // } // void customCudaPoolRowsMaxNCHW(float* in, float* out, int nc, int xy) { // thrust::device_ptr<float> inThrust = thrust::device_pointer_cast(in); // thrust::device_ptr<float> outThrust = thrust::device_pointer_cast(out); // thrust::reduce_by_key( // thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(xy)), // thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(xy)) + (nc*xy), // inThrust, // thrust::make_discard_iterator(), // outThrust, // thrust::equal_to<int>(), // thrust::maximum<float>() // ); // } __global__ void sumChannelsNCHWKernel(const float* in, float* out, int cSize, int xySize, float scaleSum) { extern __shared__ float sumPoolNCHWShared[]; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; float acc = 0.0f; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { acc += in[xyIdx + cIdx * xySize + nIdx * xycSize]; xyIdx += xyBlockDim; } sumPoolNCHWShared[sharedIdx] = acc; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumPoolNCHWShared[sharedIdx] += sumPoolNCHWShared[sharedIdx + s]; } __syncthreads(); } if(xyId == 0 && cIdx < cSize) out[cIdx + nIdx * cSize] = sumPoolNCHWShared[sharedIdx] * scaleSum; } __global__ void valueHeadPoolChannelsNCHWKernel(const float* in, float* out, int nSize, int cSize, int xySize, const float* maskSum) { extern __shared__ float sumPoolNCHWShared[]; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; float acc = 0.0f; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { acc += in[xyIdx + cIdx * xySize + nIdx * xycSize]; xyIdx += xyBlockDim; } sumPoolNCHWShared[sharedIdx] = acc; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumPoolNCHWShared[sharedIdx] += sumPoolNCHWShared[sharedIdx + s]; } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumPoolNCHWShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * cSize*3] = mean; out[cIdx + nIdx * cSize*3 + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f; out[cIdx + nIdx * cSize*3 + cSize*2] = mean * ((sqrtdiv - 14.0f) * (sqrtdiv - 14.0f) * 0.01f - 0.1f); } } __global__ void maxPositiveChannelsNCHWKernel(const float* in, float* out, int cSize, int xySize) { extern __shared__ float maxPoolNCHWShared[]; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; if(cIdx < cSize) { float acc = 0.0f; int xyIdx = xyId; while(xyIdx < xySize) { acc = fmaxf(acc, in[xyIdx + cIdx * xySize + nIdx * xycSize]); xyIdx += xyBlockDim; } maxPoolNCHWShared[sharedIdx] = acc; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { maxPoolNCHWShared[sharedIdx] = fmaxf(maxPoolNCHWShared[sharedIdx], maxPoolNCHWShared[sharedIdx + s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) out[cIdx + nIdx * cSize] = maxPoolNCHWShared[sharedIdx]; } __global__ void sumAndMaxPositiveChannelsNCHWKernel(const float* in, float* out, int cSize, int xySize, float scaleSum, int sharedMemElts) { extern __shared__ float poolNCHWShared[]; float* sumShared = (float*)poolNCHWShared; float* maxShared = (float*)poolNCHWShared + sharedMemElts; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; if(cIdx < cSize) { float accSum = 0.0f; float accMax = 0.0f; int xyIdx = xyId; while(xyIdx < xySize) { float a = in[xyIdx + cIdx * xySize + nIdx * xycSize]; accSum += a; accMax = fmaxf(accMax, a); xyIdx += xyBlockDim; } sumShared[sharedIdx] = accSum; maxShared[sharedIdx] = accMax; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx], maxShared[sharedIdx + s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { out[cIdx + nIdx * (cSize*2)] = sumShared[sharedIdx] * scaleSum; out[cIdx + nIdx * (cSize*2) + cSize] = maxShared[sharedIdx]; } } __global__ void gPoolChannelsNCHWKernel(const float* in, float* out, int cSize, int xySize, const float* maskSum, int sharedMemElts) { extern __shared__ float poolNCHWShared[]; float* sumShared = (float*)poolNCHWShared; float* maxShared = (float*)poolNCHWShared + sharedMemElts; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; if(cIdx < cSize) { float accSum = 0.0f; float accMax = 0.0f; int xyIdx = xyId; while(xyIdx < xySize) { float a = in[xyIdx + cIdx * xySize + nIdx * xycSize]; accSum += a; accMax = fmaxf(accMax, a); xyIdx += xyBlockDim; } sumShared[sharedIdx] = accSum; maxShared[sharedIdx] = accMax; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx], maxShared[sharedIdx + s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * (cSize*3)] = mean; out[cIdx + nIdx * (cSize*3) + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f; out[cIdx + nIdx * (cSize*3) + cSize*2] = maxShared[sharedIdx]; } } void customCudaPoolRowsSumNCHW(const float* in, float* out, int nSize, int cSize, int xySize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); hipLaunchKernelGGL(( sumChannelsNCHWKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,cSize,xySize,scaleSum); } void customCudaValueHeadPoolNCHW(const float* in, float* out, int nSize, int cSize, int xySize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaValueHeadPoolNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaValueHeadPoolNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); hipLaunchKernelGGL(( valueHeadPoolChannelsNCHWKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,nSize,cSize,xySize,maskSum); } void customCudaPoolRowsMaxPositiveNCHW(const float* in, float* out, int nSize, int cSize, int xySize) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsMaxPositiveNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsMaxPositiveNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); hipLaunchKernelGGL(( maxPositiveChannelsNCHWKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,cSize,xySize); } void customCudaPoolRowsSumAndMaxPositiveNCHW(const float* in, float* out, int nSize, int cSize, int xySize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); hipLaunchKernelGGL(( sumAndMaxPositiveChannelsNCHWKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,cSize,xySize,scaleSum,sharedMemElts); } void customCudaPoolRowsGPoolNCHW(const float* in, float* out, int nSize, int cSize, int xySize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); hipLaunchKernelGGL(( gPoolChannelsNCHWKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,cSize,xySize,maskSum,sharedMemElts); } //-------------------------------------------------------------------------------------------------------------- #ifdef CUDA_SUPPORTS_FP16 __global__ void sumAndMaxPositiveChannelsNCHWHalfKernel(const half* in, half* out, int cSize, int xySize, float scaleSum, int sharedMemElts) { extern __shared__ float poolNCHWShared[]; float* sumShared = (float*)poolNCHWShared; float* maxShared = (float*)poolNCHWShared + sharedMemElts; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; if(cIdx < cSize) { float accSum = 0.0f; float accMax = 0.0f; int xyIdx = xyId; while(xyIdx < xySize) { float a = __half2float(in[xyIdx + cIdx * xySize + nIdx * xycSize]); accSum += a; accMax = fmaxf(accMax, a); xyIdx += xyBlockDim; } sumShared[sharedIdx] = accSum; maxShared[sharedIdx] = accMax; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx], maxShared[sharedIdx + s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { out[cIdx + nIdx * (cSize*2)] = __float2half(sumShared[sharedIdx] * scaleSum); out[cIdx + nIdx * (cSize*2) + cSize] = __float2half(maxShared[sharedIdx]); } } __global__ void gPoolChannelsNCHWHalfKernel(const half* in, half* out, int cSize, int xySize, const float* maskSum, int sharedMemElts) { extern __shared__ float poolNCHWShared[]; float* sumShared = (float*)poolNCHWShared; float* maxShared = (float*)poolNCHWShared + sharedMemElts; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; if(cIdx < cSize) { float accSum = 0.0f; float accMax = 0.0f; int xyIdx = xyId; while(xyIdx < xySize) { float a = __half2float(in[xyIdx + cIdx * xySize + nIdx * xycSize]); accSum += a; accMax = fmaxf(accMax, a); xyIdx += xyBlockDim; } sumShared[sharedIdx] = accSum; maxShared[sharedIdx] = accMax; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx], maxShared[sharedIdx + s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * (cSize*3)] = __float2half(mean); out[cIdx + nIdx * (cSize*3) + cSize] = __float2half(mean * (sqrtdiv - 14.0f) * 0.1f); out[cIdx + nIdx * (cSize*3) + cSize*2] = __float2half(maxShared[sharedIdx]); } } #else __global__ void sumAndMaxPositiveChannelsNCHWHalfKernel(const half* in, half* out, int cSize, int xySize, float scaleSum, int sharedMemElts) { //Do nothing, FP16 not supported } __global__ void gPoolChannelsNCHWHalfKernel(const half* in, half* out, int cSize, int xySize, const float* maskSum, int sharedMemElts) { //Do nothing, FP16 not supported } #endif void customCudaPoolRowsSumAndMaxPositiveNCHW(const half* in, half* out, int nSize, int cSize, int xySize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); hipLaunchKernelGGL(( sumAndMaxPositiveChannelsNCHWHalfKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,cSize,xySize,scaleSum,sharedMemElts); } void customCudaPoolRowsGPoolNCHW(const half* in, half* out, int nSize, int cSize, int xySize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); hipLaunchKernelGGL(( gPoolChannelsNCHWHalfKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,cSize,xySize,maskSum,sharedMemElts); } //-------------------------------------------------------------------------------------------------------------- __global__ void sumChannelsNHWCKernel(const float* in, float* out, int xySize, int cSize, float scaleSum) { extern __shared__ float sumPoolNHWCShared[]; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumPoolNHWCShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { sumPoolNHWCShared[sharedIdx] += in[cIdx + xyIdx * cSize + nIdx * xycSize]; xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumPoolNHWCShared[sharedIdx] += sumPoolNHWCShared[sharedIdx + cBlockDim * s]; } __syncthreads(); } if(xyId == 0 && cIdx < cSize) out[cIdx + nIdx * cSize] = sumPoolNHWCShared[sharedIdx] * scaleSum; } __global__ void valueHeadPoolChannelsNHWCKernel(const float* in, float* out, int nSize, int xySize, int cSize, const float* maskSum) { extern __shared__ float sumPoolNHWCShared[]; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumPoolNHWCShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { sumPoolNHWCShared[sharedIdx] += in[cIdx + xyIdx * cSize + nIdx * xycSize]; xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumPoolNHWCShared[sharedIdx] += sumPoolNHWCShared[sharedIdx + cBlockDim * s]; } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumPoolNHWCShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * cSize*3] = mean; out[cIdx + nIdx * cSize*3 + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f; out[cIdx + nIdx * cSize*3 + cSize*2] = mean * ((sqrtdiv - 14.0f) * (sqrtdiv - 14.0f) * 0.01f - 0.1f); } } __global__ void maxPositiveChannelsNHWCKernel(const float* in, float* out, int xySize, int cSize) { extern __shared__ float maxPoolNHWCShared[]; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; maxPoolNHWCShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { maxPoolNHWCShared[sharedIdx] = fmaxf(maxPoolNHWCShared[sharedIdx],in[cIdx + xyIdx * cSize + nIdx * xycSize]); xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { maxPoolNHWCShared[sharedIdx] = fmaxf(maxPoolNHWCShared[sharedIdx],maxPoolNHWCShared[sharedIdx + cBlockDim * s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) out[cIdx + nIdx * cSize] = maxPoolNHWCShared[sharedIdx]; } __global__ void sumAndMaxPositiveChannelsNHWCKernel(const float* in, float* out, int xySize, int cSize, float scaleSum, int sharedMemElts) { extern __shared__ float poolNHWCShared[]; float* sumShared = (float*)poolNHWCShared; float* maxShared = (float*)poolNHWCShared + sharedMemElts; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumShared[sharedIdx] = 0; maxShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { float a = in[cIdx + xyIdx * cSize + nIdx * xycSize]; sumShared[sharedIdx] += a; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],a); xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + cBlockDim * s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],maxShared[sharedIdx + cBlockDim * s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { out[cIdx + nIdx * (cSize*2)] = sumShared[sharedIdx] * scaleSum; out[cIdx + nIdx * (cSize*2) + cSize] = maxShared[sharedIdx]; } } __global__ void gPoolChannelsNHWCKernel(const float* in, float* out, int xySize, int cSize, const float* maskSum, int sharedMemElts) { extern __shared__ float poolNHWCShared[]; float* sumShared = (float*)poolNHWCShared; float* maxShared = (float*)poolNHWCShared + sharedMemElts; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumShared[sharedIdx] = 0; maxShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { float a = in[cIdx + xyIdx * cSize + nIdx * xycSize]; sumShared[sharedIdx] += a; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],a); xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + cBlockDim * s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],maxShared[sharedIdx + cBlockDim * s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * (cSize*3)] = mean; out[cIdx + nIdx * (cSize*3) + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f; out[cIdx + nIdx * (cSize*3) + cSize*2] = maxShared[sharedIdx]; } } void customCudaPoolRowsSumNHWC(const float* in, float* out, int nSize, int xySize, int cSize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); hipLaunchKernelGGL(( sumChannelsNHWCKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,xySize,cSize,scaleSum); } void customCudaValueHeadPoolNHWC(const float* in, float* out, int nSize, int xySize, int cSize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaValueHeadPoolNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaValueHeadPoolNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); hipLaunchKernelGGL(( valueHeadPoolChannelsNHWCKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,nSize,xySize,cSize,maskSum); } void customCudaPoolRowsMaxPositiveNHWC(const float* in, float* out, int nSize, int xySize, int cSize) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsMaxPositiveNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsMaxPositiveNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); hipLaunchKernelGGL(( maxPositiveChannelsNHWCKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,xySize,cSize); } void customCudaPoolRowsSumAndMaxPositiveNHWC(const float* in, float* out, int nSize, int xySize, int cSize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); hipLaunchKernelGGL(( sumAndMaxPositiveChannelsNHWCKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,xySize,cSize,scaleSum,sharedMemElts); } void customCudaPoolRowsGPoolNHWC(const float* in, float* out, int nSize, int xySize, int cSize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); hipLaunchKernelGGL(( gPoolChannelsNHWCKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,xySize,cSize,maskSum,sharedMemElts); } //-------------------------------------------------------------------------------------------------------------- #ifdef CUDA_SUPPORTS_FP16 __global__ void sumAndMaxPositiveChannelsNHWCHalfKernel(const half* in, half* out, int xySize, int cSize, float scaleSum, int sharedMemElts) { extern __shared__ float poolNHWCShared[]; float* sumShared = (float*)poolNHWCShared; float* maxShared = (float*)poolNHWCShared + sharedMemElts; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumShared[sharedIdx] = 0; maxShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { float a = __half2float(in[cIdx + xyIdx * cSize + nIdx * xycSize]); sumShared[sharedIdx] += a; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],a); xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + cBlockDim * s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],maxShared[sharedIdx + cBlockDim * s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { out[cIdx + nIdx * (cSize*2)] = __float2half(sumShared[sharedIdx] * scaleSum); out[cIdx + nIdx * (cSize*2) + cSize] = __float2half(maxShared[sharedIdx]); } } __global__ void gPoolChannelsNHWCHalfKernel(const half* in, half* out, int xySize, int cSize, const float* maskSum, int sharedMemElts) { extern __shared__ float poolNHWCShared[]; float* sumShared = (float*)poolNHWCShared; float* maxShared = (float*)poolNHWCShared + sharedMemElts; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumShared[sharedIdx] = 0; maxShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { float a = __half2float(in[cIdx + xyIdx * cSize + nIdx * xycSize]); sumShared[sharedIdx] += a; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],a); xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + cBlockDim * s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],maxShared[sharedIdx + cBlockDim * s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * (cSize*3)] = __float2half(mean); out[cIdx + nIdx * (cSize*3) + cSize] = __float2half(mean * (sqrtdiv - 14.0f) * 0.1f); out[cIdx + nIdx * (cSize*3) + cSize*2] = __float2half(maxShared[sharedIdx]); } } #else __global__ void sumAndMaxPositiveChannelsNHWCHalfKernel(const half* in, half* out, int xySize, int cSize, float scaleSum, int sharedMemElts) { //Do nothing, FP16 not supported } __global__ void gPoolChannelsNHWCHalfKernel(const half* in, half* out, int xySize, int cSize, const float* maskSum, int sharedMemElts) { //Do nothing, FP16 not supported } #endif void customCudaPoolRowsSumAndMaxPositiveNHWC(const half* in, half* out, int nSize, int xySize, int cSize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); hipLaunchKernelGGL(( sumAndMaxPositiveChannelsNHWCHalfKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,xySize,cSize,scaleSum,sharedMemElts); } void customCudaPoolRowsGPoolNHWC(const half* in, half* out, int nSize, int xySize, int cSize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); hipLaunchKernelGGL(( gPoolChannelsNHWCHalfKernel), dim3(grid),dim3(threads),sharedMemSize, 0, in,out,xySize,cSize,maskSum,sharedMemElts); } //-------------------------------------------------------------------------------------------------------------- __global__ void nchwTransposeKernel(const float *in, float* out, int xSize, int ySize, int tileDim, int tileStride, int xySize) { //+1 avoids bank conflicts extern __shared__ float tileNCHW[]; int tileDimP1 = tileDim+1; int xIdx = blockIdx.x * tileDim + threadIdx.x; int yIdx = blockIdx.y * tileDim + threadIdx.y; int nc = blockIdx.z; if(xIdx < xSize) { for(int j = 0; j < tileDim && yIdx+j < ySize; j += tileStride) { int inIdx = xIdx + xSize * (yIdx+j) + xySize * nc; tileNCHW[(threadIdx.y+j)*tileDimP1 + threadIdx.x] = in[inIdx]; } } __syncthreads(); //Transpose idx int outXIdx = blockIdx.y * tileDim + threadIdx.x; int outYIdx = blockIdx.x * tileDim + threadIdx.y; if(outXIdx < ySize) { for(int j = 0; j < tileDim && outYIdx+j < xSize; j += tileStride) { int outIdx = outXIdx + ySize * (outYIdx+j) + xySize * nc; out[outIdx] = tileNCHW[threadIdx.x*tileDimP1 + threadIdx.y+j]; } } } __global__ void nhwcTransposeKernel(const float *in, float* out, int xSize, int ySize, int cSize, int tileDim, int tileStride, int xycSize) { //+1 reduces bank conflicts extern __shared__ float tileNHWC[]; int tileDimP1 = tileDim+1; int xIdx = blockIdx.x * tileDim + threadIdx.x; int yIdx = blockIdx.y * tileDim + threadIdx.y; int cIdx = threadIdx.z; int n = blockIdx.z; if(xIdx < xSize) { for(int j = 0; j < tileDim && yIdx+j < ySize; j += tileStride) { int inIdx = cIdx + cSize * (xIdx + xSize * (yIdx+j)) + xycSize * n; tileNHWC[cIdx + cSize * ((threadIdx.y+j)*tileDimP1 + threadIdx.x)] = in[inIdx]; } } __syncthreads(); //Transpose idx int outXIdx = blockIdx.y * tileDim + threadIdx.x; int outYIdx = blockIdx.x * tileDim + threadIdx.y; if(outXIdx < ySize) { for(int j = 0; j < tileDim && outYIdx+j < xSize; j += tileStride) { int outIdx = cIdx + cSize * (outXIdx + ySize * (outYIdx+j)) + xycSize * n; out[outIdx] = tileNHWC[cIdx + cSize * (threadIdx.x*tileDimP1 + threadIdx.y+j)]; } } } __global__ void nchwTransposeHalfKernel(const half *in, half* out, int xSize, int ySize, int tileDim, int tileStride, int xySize) { //+1 avoids bank conflicts extern __shared__ half tileNCHWHALF[]; int tileDimP1 = tileDim+1; int xIdx = blockIdx.x * tileDim + threadIdx.x; int yIdx = blockIdx.y * tileDim + threadIdx.y; int nc = blockIdx.z; if(xIdx < xSize) { for(int j = 0; j < tileDim && yIdx+j < ySize; j += tileStride) { int inIdx = xIdx + xSize * (yIdx+j) + xySize * nc; tileNCHWHALF[(threadIdx.y+j)*tileDimP1 + threadIdx.x] = in[inIdx]; } } __syncthreads(); //Transpose idx int outXIdx = blockIdx.y * tileDim + threadIdx.x; int outYIdx = blockIdx.x * tileDim + threadIdx.y; if(outXIdx < ySize) { for(int j = 0; j < tileDim && outYIdx+j < xSize; j += tileStride) { int outIdx = outXIdx + ySize * (outYIdx+j) + xySize * nc; out[outIdx] = tileNCHWHALF[threadIdx.x*tileDimP1 + threadIdx.y+j]; } } } __global__ void nhwcTransposeHalfKernel(const half *in, half* out, int xSize, int ySize, int cSize, int tileDim, int tileStride, int xycSize) { //+1 reduces bank conflicts extern __shared__ half tileNHWCHALF[]; int tileDimP1 = tileDim+1; int xIdx = blockIdx.x * tileDim + threadIdx.x; int yIdx = blockIdx.y * tileDim + threadIdx.y; int cIdx = threadIdx.z; int n = blockIdx.z; if(xIdx < xSize) { for(int j = 0; j < tileDim && yIdx+j < ySize; j += tileStride) { int inIdx = cIdx + cSize * (xIdx + xSize * (yIdx+j)) + xycSize * n; tileNHWCHALF[cIdx + cSize * ((threadIdx.y+j)*tileDimP1 + threadIdx.x)] = in[inIdx]; } } __syncthreads(); //Transpose idx int outXIdx = blockIdx.y * tileDim + threadIdx.x; int outYIdx = blockIdx.x * tileDim + threadIdx.y; if(outXIdx < ySize) { for(int j = 0; j < tileDim && outYIdx+j < xSize; j += tileStride) { int outIdx = cIdx + cSize * (outXIdx + ySize * (outYIdx+j)) + xycSize * n; out[outIdx] = tileNHWCHALF[cIdx + cSize * (threadIdx.x*tileDimP1 + threadIdx.y+j)]; } } } static void sharedNCHWTranspose(const void *in, void* out, int xSize, int ySize, int ncSize, bool isHalf) { if(ncSize > 65536) throw std::runtime_error("customCudaNCHWTranspose: ncSize too large"); //TODO maybe tune these numbers, it varies by GPU //The first one should be the warp size, since it's set to what we need to avoid bank conflicts? //Or is it better to just make it xSize, to reduce overhead on top of 19x19? int tileDim = 32; int tileStride = targetNumThreads/tileDim; dim3 grid((xSize+tileDim-1)/tileDim,(ySize+tileDim-1)/tileDim,ncSize); dim3 threads(tileDim,tileStride,1); if(isHalf) { int sharedMemSize = sizeof(half)*tileDim*(tileDim+1); hipLaunchKernelGGL(( nchwTransposeHalfKernel), dim3(grid),dim3(threads),sharedMemSize, 0, (const half*)in,(half*)out,xSize,ySize,tileDim,tileStride,xSize*ySize); } else { int sharedMemSize = sizeof(float)*tileDim*(tileDim+1); hipLaunchKernelGGL(( nchwTransposeKernel), dim3(grid),dim3(threads),sharedMemSize, 0, (const float*)in,(float*)out,xSize,ySize,tileDim,tileStride,xSize*ySize); } } void customCudaNCHWTranspose(const float *in, float* out, int xSize, int ySize, int ncSize) { sharedNCHWTranspose(in,out,xSize,ySize,ncSize,false); } void customCudaNCHWTranspose(const half *in, half* out, int xSize, int ySize, int ncSize) { sharedNCHWTranspose(in,out,xSize,ySize,ncSize,true); } void sharedNHWCTranspose(const void *in, void* out, int xSize, int ySize, int cSize, int nSize, bool isHalf) { if(cSize > 64) throw std::runtime_error("customCudaNHWCTranspose: cSize too large"); int tileDim = 1; while(tileDim * 2 * cSize <= targetNumThreads) tileDim *= 2; int tileStride = 1; if(tileDim > 32) { tileStride = tileDim / 32; tileDim = 32; } dim3 grid((xSize+tileDim-1)/tileDim,(ySize+tileDim-1)/tileDim,nSize); dim3 threads(tileDim,tileStride,cSize); if(isHalf) { int sharedMemSize = sizeof(half)*tileDim*(tileDim+1)*cSize; hipLaunchKernelGGL(( nhwcTransposeHalfKernel), dim3(grid),dim3(threads),sharedMemSize, 0, (const half*)in,(half*)out,xSize,ySize,cSize,tileDim,tileStride,xSize*ySize*cSize); } else { int sharedMemSize = sizeof(float)*tileDim*(tileDim+1)*cSize; hipLaunchKernelGGL(( nhwcTransposeKernel), dim3(grid),dim3(threads),sharedMemSize, 0, (const float*)in,(float*)out,xSize,ySize,cSize,tileDim,tileStride,xSize*ySize*cSize); } } void customCudaNHWCTranspose(const float *in, float* out, int xSize, int ySize, int cSize, int nSize) { sharedNHWCTranspose(in,out,xSize,ySize,cSize,nSize,false); } void customCudaNHWCTranspose(const half *in, half* out, int xSize, int ySize, int cSize, int nSize) { sharedNHWCTranspose(in,out,xSize,ySize,cSize,nSize,true); } //-------------------------------------------------------------------------------------------------------------- template <typename T> __global__ void mirrorKernel(const T *in, T* out, int mSize, int subSize) { int subIdx = blockIdx.x * blockDim.x + threadIdx.x; int mIdx = blockIdx.y * blockDim.y + threadIdx.y; int batchIdx = blockIdx.z; if(subIdx < subSize && mIdx < mSize) { int inIdx = subIdx + subSize * (mIdx + mSize * batchIdx); int outIdx = subIdx + subSize * ((mSize-mIdx-1) + mSize * batchIdx); out[outIdx] = in[inIdx]; } } template <typename T> void customCudaMirrorTemplate(const T *in, T* out, int batchSize, int mSize, int subSize) { if(batchSize > 65536) throw std::runtime_error("customCudaMirror: batchSize too large"); if(mSize > 65536) throw std::runtime_error("customCudaMirror: mSize too large"); int subThreads; int subBlocks; int mThreads; int mBlocks; if(subSize > targetNumThreads) { subThreads = targetNumThreads/2; subBlocks = (subSize + subThreads - 1) / subThreads; mThreads = 1; mBlocks = mSize; } else if(subSize > targetNumThreads/2) { subThreads = subSize; subBlocks = 1; mThreads = 1; mBlocks = mSize; } else { subThreads = subSize; subBlocks = 1; mThreads = targetNumThreads / subSize; mBlocks = (mSize + mThreads - 1) / mThreads; } dim3 grid(subBlocks,mBlocks,batchSize); dim3 threads(subThreads,mThreads,1); hipLaunchKernelGGL(( mirrorKernel), dim3(grid),dim3(threads), 0, 0, in,out,mSize,subSize); } template <typename T> void customCudaMirrorNCHWTemplate(const T *in, T* out, int batchSize, int cSize, int ySize, int xSize, bool mirrorY, bool mirrorX) { if(mirrorY && mirrorX) customCudaMirrorTemplate(in,out,batchSize*cSize,ySize*xSize,1); else if(mirrorY) customCudaMirrorTemplate(in,out,batchSize*cSize,ySize,xSize); else if(mirrorX) customCudaMirrorTemplate(in,out,batchSize*cSize*ySize,xSize,1); else hipMemcpyAsync(out,in,sizeof(T)*batchSize*cSize*ySize*xSize,hipMemcpyDeviceToDevice); } template <typename T> void customCudaMirrorNHWCTemplate(const T *in, T* out, int batchSize, int ySize, int xSize, int cSize, bool mirrorY, bool mirrorX) { if(mirrorY && mirrorX) customCudaMirrorTemplate(in,out,batchSize,ySize*xSize,cSize); else if(mirrorY) customCudaMirrorTemplate(in,out,batchSize,ySize,xSize*cSize); else if(mirrorX) customCudaMirrorTemplate(in,out,batchSize*ySize,xSize,cSize); else hipMemcpyAsync(out,in,sizeof(T)*batchSize*ySize*xSize*cSize,hipMemcpyDeviceToDevice); } void customCudaMirror(const float *in, float* out, int batchSize, int mSize, int subSize) { customCudaMirrorTemplate<float>(in,out,batchSize,mSize,subSize); } void customCudaMirrorNCHW(const float *in, float* out, int batchSize, int cSize, int ySize, int xSize, bool mirrorY, bool mirrorX) { customCudaMirrorNCHWTemplate<float>(in,out,batchSize,cSize,ySize,xSize,mirrorY,mirrorX); } void customCudaMirrorNHWC(const float *in, float* out, int batchSize, int ySize, int xSize, int cSize, bool mirrorY, bool mirrorX) { customCudaMirrorNHWCTemplate<float>(in,out,batchSize,ySize,xSize,cSize,mirrorY,mirrorX); } void customCudaMirror(const half *in, half* out, int batchSize, int mSize, int subSize) { customCudaMirrorTemplate<half>(in,out,batchSize,mSize,subSize); } void customCudaMirrorNCHW(const half *in, half* out, int batchSize, int cSize, int ySize, int xSize, bool mirrorY, bool mirrorX) { customCudaMirrorNCHWTemplate<half>(in,out,batchSize,cSize,ySize,xSize,mirrorY,mirrorX); } void customCudaMirrorNHWC(const half *in, half* out, int batchSize, int ySize, int xSize, int cSize, bool mirrorY, bool mirrorX) { customCudaMirrorNHWCTemplate<half>(in,out,batchSize,ySize,xSize,cSize,mirrorY,mirrorX); } //-------------------------------------------------------------------------------------------------------------- __global__ void copyToHalfKernel(const float *in, half* out, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < n) { out[idx] = __float2half(in[idx]); } } __global__ void copyFromHalfKernel(const half *in, float* out, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < n) { out[idx] = __half2float(in[idx]); } } void customCudaCopyToHalf(const float* in, half* out, int n) { int blockSize = targetNumThreads; int numBlocks = (n+blockSize-1)/blockSize; hipLaunchKernelGGL(( copyToHalfKernel), dim3(numBlocks), dim3(blockSize), 0, 0, in,out,n); } void customCudaCopyFromHalf(const half* in, float* out, int n) { int blockSize = targetNumThreads; int numBlocks = (n+blockSize-1)/blockSize; hipLaunchKernelGGL(( copyFromHalfKernel), dim3(numBlocks), dim3(blockSize), 0, 0, in,out,n); } //-------------------------------------------------------------------------------------------------------------- #ifdef CUDA_SUPPORTS_FP16 __global__ void addTensorInplaceHalfKernel(half *buf, const half* biases, int nSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < nSize) { buf[idx] = __hadd(buf[idx],biases[idx]); } } #else __global__ void addTensorInplaceHalfKernel(half *buf, const half* biases, int nSize) { //Do nothing, FP16 not supported } #endif void customCudaAddTensorInplace(half* buf, const half* biases, int nSize) { int blockSize = targetNumThreads; int numBlocks = (nSize+blockSize-1)/blockSize; hipLaunchKernelGGL(( addTensorInplaceHalfKernel), dim3(numBlocks), dim3(blockSize), 0, 0, buf,biases,nSize); } //-------------------------------------------------------------------------------------------------------------- __global__ void addCBiasInplaceNCKernel(float *buf, const float* biases, int nSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int nIdx = blockIdx.y * blockDim.y + threadIdx.y; if(cIdx < cSize && nIdx < nSize) { int idx = nIdx * cSize + cIdx; buf[idx] = buf[idx] + biases[cIdx]; } } #ifdef CUDA_SUPPORTS_FP16 __global__ void addCBiasInplaceNCHalfKernel(half *buf, const half* biases, int nSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int nIdx = blockIdx.y * blockDim.y + threadIdx.y; if(cIdx < cSize && nIdx < nSize) { int idx = nIdx * cSize + cIdx; buf[idx] = __hadd(buf[idx],biases[cIdx]); } } #else __global__ void addCBiasInplaceNCHalfKernel(half *buf, const half* biases, int nSize, int cSize) { //Do nothing, FP16 not supported } #endif void sharedAddCBiasInplaceNC(void* buf, const void* biases, int nSize, int cSize, bool isHalf) { int cThreads; int cBlocks; int nThreads; int nBlocks; if(cSize > targetNumThreads) { cThreads = targetNumThreads/2; cBlocks = (cSize + cThreads - 1) / cThreads; nThreads = 1; nBlocks = nSize; } else if(cSize > targetNumThreads/2) { cThreads = cSize; cBlocks = 1; nThreads = 1; nBlocks = nSize; } else { cThreads = cSize; cBlocks = 1; nThreads = targetNumThreads / cSize; nBlocks = (nSize + nThreads - 1) / nThreads; } if(nBlocks > 65536) throw std::runtime_error("customCudaAddCBiasInplaceNC: nSize too large given cSize"); dim3 grid(cBlocks,nBlocks,1); dim3 threads(cThreads,nThreads,1); if(isHalf) hipLaunchKernelGGL(( addCBiasInplaceNCHalfKernel), dim3(grid),dim3(threads), 0, 0, (half*)buf,(const half*)biases,nSize,cSize); else hipLaunchKernelGGL(( addCBiasInplaceNCKernel), dim3(grid),dim3(threads), 0, 0, (float*)buf,(const float*)biases,nSize,cSize); } void customCudaAddCBiasInplaceNC(float* buf, const float* biases, int nSize, int cSize) { sharedAddCBiasInplaceNC(buf,biases,nSize,cSize,false); } void customCudaAddCBiasInplaceNC(half* buf, const half* biases, int nSize, int cSize) { sharedAddCBiasInplaceNC(buf,biases,nSize,cSize,true); } //-------------------------------------------------------------------------------------------------------------- __global__ void addNCBiasInplaceNCHWKernel(float *buf, const float* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int ncIdx = nIdx * cSize + cIdx; int idx = ncIdx * sSize + sIdx; buf[idx] = buf[idx] + biases[ncIdx]; } } #ifdef CUDA_SUPPORTS_FP16 __global__ void addNCBiasInplaceNCHWHalfKernel(half *buf, const half* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int ncIdx = nIdx * cSize + cIdx; int idx = ncIdx * sSize + sIdx; buf[idx] = __hadd(buf[idx],biases[ncIdx]); } } #else __global__ void addNCBiasInplaceNCHWHalfKernel(half *buf, const half* biases, int cSize, int sSize) { //Do nothing, FP16 not supported } #endif void sharedAddNCBiasInplaceNCHW(void *buf, const void* biases, int nSize, int cSize, int xySize, bool isHalf) { if(nSize > 65536) throw std::runtime_error("customCudaAddNCBiasInplaceNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaAddNCBiasInplaceNCHW: cSize too large"); int sSize = xySize; int sThreads; int sBlocks; int cThreads; int cBlocks; if(sSize > targetNumThreads) { sThreads = targetNumThreads/2; sBlocks = (sSize + sThreads - 1) / sThreads; cThreads = 1; cBlocks = cSize; } else if(sSize > targetNumThreads/2) { sThreads = sSize; sBlocks = 1; cThreads = 1; cBlocks = cSize; } else { sThreads = sSize; sBlocks = 1; cThreads = targetNumThreads / sSize; cBlocks = (cSize + cThreads - 1) / cThreads; } dim3 grid(sBlocks,cBlocks,nSize); dim3 threads(sThreads,cThreads,1); if(isHalf) hipLaunchKernelGGL(( addNCBiasInplaceNCHWHalfKernel), dim3(grid),dim3(threads), 0, 0, (half*)buf,(const half*)biases,cSize,sSize); else hipLaunchKernelGGL(( addNCBiasInplaceNCHWKernel), dim3(grid),dim3(threads), 0, 0, (float*)buf,(const float*)biases,cSize,sSize); } void customCudaAddNCBiasInplaceNCHW(float *buf, const float* biases, int nSize, int cSize, int xySize) { sharedAddNCBiasInplaceNCHW(buf,biases,nSize,cSize,xySize,false); } void customCudaAddNCBiasInplaceNCHW(half *buf, const half* biases, int nSize, int cSize, int xySize) { sharedAddNCBiasInplaceNCHW(buf,biases,nSize,cSize,xySize,true); } //-------------------------------------------------------------------------------------------------------------- __global__ void addNCBiasInplaceNHWCKernel(float *buf, const float* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int ncIdx = nIdx * cSize + cIdx; int idx = (nIdx * sSize + sIdx) * cSize + cIdx; buf[idx] = buf[idx] + biases[ncIdx]; } } #ifdef CUDA_SUPPORTS_FP16 __global__ void addNCBiasInplaceNHWCHalfKernel(half *buf, const half* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int ncIdx = nIdx * cSize + cIdx; int idx = (nIdx * sSize + sIdx) * cSize + cIdx; buf[idx] = __hadd(buf[idx],biases[ncIdx]); } } #else __global__ void addNCBiasInplaceNHWCHalfKernel(half *buf, const half* biases, int sSize, int cSize) { //Do nothing, FP16 not supported } #endif void sharedAddNCBiasInplaceNHWC(void *buf, const void* biases, int nSize, int xySize, int cSize, bool isHalf) { if(nSize > 65536) throw std::runtime_error("customCudaAddNCBiasInplaceNHWC: nSize too large"); if(xySize > 65536) throw std::runtime_error("customCudaAddNCBiasInplaceNHWC: xySize too large"); int sSize = xySize; int cThreads; int cBlocks; int sThreads; int sBlocks; if(cSize > targetNumThreads) { cThreads = targetNumThreads/2; cBlocks = (cSize + cThreads - 1) / cThreads; sThreads = 1; sBlocks = sSize; } else if(cSize > targetNumThreads/2) { cThreads = cSize; cBlocks = 1; sThreads = 1; sBlocks = sSize; } else { cThreads = cSize; cBlocks = 1; sThreads = targetNumThreads / cSize; sBlocks = (sSize + sThreads - 1) / sThreads; } dim3 grid(cBlocks,sBlocks,nSize); dim3 threads(cThreads,sThreads,1); if(isHalf) hipLaunchKernelGGL(( addNCBiasInplaceNHWCHalfKernel), dim3(grid),dim3(threads), 0, 0, (half*)buf,(const half*)biases,sSize,cSize); else hipLaunchKernelGGL(( addNCBiasInplaceNHWCKernel), dim3(grid),dim3(threads), 0, 0, (float*)buf,(const float*)biases,sSize,cSize); } void customCudaAddNCBiasInplaceNHWC(float *buf, const float* biases, int nSize, int xySize, int cSize) { sharedAddNCBiasInplaceNHWC(buf,biases,nSize,xySize,cSize,false); } void customCudaAddNCBiasInplaceNHWC(half *buf, const half* biases, int nSize, int xySize, int cSize) { sharedAddNCBiasInplaceNHWC(buf,biases,nSize,xySize,cSize,true); } //-------------------------------------------------------------------------------------------------------------- __global__ void applyCScaleBiasNCHWKernel(const float *in, float* out, const float* scale, const float* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = in[idx] * scale[cIdx] + biases[cIdx]; } } __global__ void applyCScaleBiasNCHWReluKernel(const float *in, float* out, const float* scale, const float* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f); } } __global__ void applyCScaleBiasNCHWMaskKernel(const float *in, float* out, const float* scale, const float* biases, const float* mask, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = (in[idx] * scale[cIdx] + biases[cIdx]) * mask[nIdx*sSize+sIdx]; } } __global__ void applyCScaleBiasNCHWReluMaskKernel(const float *in, float* out, const float* scale, const float* biases, const float* mask, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f) * mask[nIdx*sSize+sIdx]; } } #ifdef CUDA_SUPPORTS_FP16 __global__ void applyCScaleBiasNCHWHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = __hfma(in[idx],scale[cIdx],biases[cIdx]); } } __global__ void applyCScaleBiasNCHWReluHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; half a = __hfma(in[idx],scale[cIdx],biases[cIdx]); const half halfzero = __float2half(0.0f); out[idx] = __hgt(a,halfzero) ? a : halfzero; } } __global__ void applyCScaleBiasNCHWMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]); } } __global__ void applyCScaleBiasNCHWReluMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; half a = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]); const half halfzero = __float2half(0.0f); out[idx] = __hgt(a,halfzero) ? a : halfzero; } } #else __global__ void applyCScaleBiasNCHWHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNCHWReluHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNCHWMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNCHWReluMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize) { //Do nothing, FP16 not supported } #endif void sharedApplyCScaleBiasNCHW(const void* in, void* out, const void* scale, const void* biases, const void* mask, int nSize, int cSize, int xySize, bool isHalf, bool applyRelu) { if(nSize > 65536) throw std::runtime_error("customCudaApplyCScaleBiasNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaApplyCScaleBiasNCHW: cSize too large"); int sSize = xySize; int sThreads; int sBlocks; int cThreads; int cBlocks; if(sSize > targetNumThreads) { sThreads = targetNumThreads/2; sBlocks = (sSize + sThreads - 1) / sThreads; cThreads = 1; cBlocks = cSize; } else if(sSize > targetNumThreads/2) { sThreads = sSize; sBlocks = 1; cThreads = 1; cBlocks = cSize; } else { sThreads = sSize; sBlocks = 1; cThreads = targetNumThreads / sSize; cBlocks = (cSize + cThreads - 1) / cThreads; } dim3 grid(sBlocks,cBlocks,nSize); dim3 threads(sThreads,cThreads,1); if(mask == NULL) { if(applyRelu) { if(isHalf) hipLaunchKernelGGL(( applyCScaleBiasNCHWReluHalfKernel), dim3(grid),dim3(threads), 0, 0, (const half*)in,(half*)out,(const half*)scale,(const half*)biases,cSize,sSize); else hipLaunchKernelGGL(( applyCScaleBiasNCHWReluKernel), dim3(grid),dim3(threads), 0, 0, (const float*)in,(float*)out,(const float*)scale,(const float*)biases,cSize,sSize); } else { if(isHalf) hipLaunchKernelGGL(( applyCScaleBiasNCHWHalfKernel), dim3(grid),dim3(threads), 0, 0, (const half*)in,(half*)out,(const half*)scale,(const half*)biases,cSize,sSize); else hipLaunchKernelGGL(( applyCScaleBiasNCHWKernel), dim3(grid),dim3(threads), 0, 0, (const float*)in,(float*)out,(const float*)scale,(const float*)biases,cSize,sSize); } } else { if(applyRelu) { if(isHalf) hipLaunchKernelGGL(( applyCScaleBiasNCHWReluMaskHalfKernel), dim3(grid),dim3(threads), 0, 0, (const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,cSize,sSize); else hipLaunchKernelGGL(( applyCScaleBiasNCHWReluMaskKernel), dim3(grid),dim3(threads), 0, 0, (const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,cSize,sSize); } else { if(isHalf) hipLaunchKernelGGL(( applyCScaleBiasNCHWMaskHalfKernel), dim3(grid),dim3(threads), 0, 0, (const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,cSize,sSize); else hipLaunchKernelGGL(( applyCScaleBiasNCHWMaskKernel), dim3(grid),dim3(threads), 0, 0, (const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,cSize,sSize); } } } void customCudaApplyCScaleBiasNCHW(const float* in, float* out, const float* scale, const float* biases, const float* mask, int nSize, int cSize, int xySize, bool applyRelu) { sharedApplyCScaleBiasNCHW(in,out,scale,biases,mask,nSize,cSize,xySize,false,applyRelu); } void customCudaApplyCScaleBiasNCHW(const half* in, half* out, const half* scale, const half* biases, const half* mask, int nSize, int cSize, int xySize, bool applyRelu) { sharedApplyCScaleBiasNCHW(in,out,scale,biases,mask,nSize,cSize,xySize,true,applyRelu); } //-------------------------------------------------------------------------------------------------------------- __global__ void applyCScaleBiasNHWCKernel(const float* in, float* out, const float* scale, const float* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = in[idx] * scale[cIdx] + biases[cIdx]; } } __global__ void applyCScaleBiasNHWCReluKernel(const float* in, float* out, const float* scale, const float* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f); } } __global__ void applyCScaleBiasNHWCMaskKernel(const float* in, float* out, const float* scale, const float* biases, const float* mask, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = (in[idx] * scale[cIdx] + biases[cIdx]) * mask[nIdx*sSize+sIdx]; } } __global__ void applyCScaleBiasNHWCReluMaskKernel(const float* in, float* out, const float* scale, const float* biases, const float* mask, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f) * mask[nIdx*sSize+sIdx]; } } #ifdef CUDA_SUPPORTS_FP16 __global__ void applyCScaleBiasNHWCHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = __hfma(in[idx],scale[cIdx],biases[cIdx]); } } __global__ void applyCScaleBiasNHWCReluHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; half a = __hfma(in[idx],scale[cIdx],biases[cIdx]); const half halfzero = __float2half(0.0f); out[idx] = __hgt(a,halfzero) ? a : halfzero; } } __global__ void applyCScaleBiasNHWCMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]); } } __global__ void applyCScaleBiasNHWCReluMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; half a = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]); const half halfzero = __float2half(0.0f); out[idx] = __hgt(a,halfzero) ? a : halfzero; } } #else __global__ void applyCScaleBiasNHWCHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNHWCReluHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNHWCMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNHWCReluMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize) { //Do nothing, FP16 not supported } #endif void sharedApplyCScaleBiasNHWC(const void* in, void* out, const void* scale, const void* biases, const void* mask, int nSize, int xySize, int cSize, bool isHalf, bool applyRelu) { if(nSize > 65536) throw std::runtime_error("customCudaApplyCScaleBiasNHWC: nSize too large"); if(xySize > 65536) throw std::runtime_error("customCudaApplyCScaleBiasNHWC: xySize too large"); int sSize = xySize; int cThreads; int cBlocks; int sThreads; int sBlocks; if(cSize > targetNumThreads) { cThreads = targetNumThreads/2; cBlocks = (cSize + cThreads - 1) / cThreads; sThreads = 1; sBlocks = sSize; } else if(cSize > targetNumThreads/2) { cThreads = cSize; cBlocks = 1; sThreads = 1; sBlocks = sSize; } else { cThreads = cSize; cBlocks = 1; sThreads = targetNumThreads / cSize; sBlocks = (sSize + sThreads - 1) / sThreads; } dim3 grid(cBlocks,sBlocks,nSize); dim3 threads(cThreads,sThreads,1); if(mask == NULL) { if(applyRelu) { if(isHalf) hipLaunchKernelGGL(( applyCScaleBiasNHWCReluHalfKernel), dim3(grid),dim3(threads), 0, 0, (const half*)in,(half*)out,(const half*)scale,(const half*)biases,sSize,cSize); else hipLaunchKernelGGL(( applyCScaleBiasNHWCReluKernel), dim3(grid),dim3(threads), 0, 0, (const float*)in,(float*)out,(const float*)scale,(const float*)biases,sSize,cSize); } else { if(isHalf) hipLaunchKernelGGL(( applyCScaleBiasNHWCHalfKernel), dim3(grid),dim3(threads), 0, 0, (const half*)in,(half*)out,(const half*)scale,(const half*)biases,sSize,cSize); else hipLaunchKernelGGL(( applyCScaleBiasNHWCKernel), dim3(grid),dim3(threads), 0, 0, (const float*)in,(float*)out,(const float*)scale,(const float*)biases,sSize,cSize); } } else { if(applyRelu) { if(isHalf) hipLaunchKernelGGL(( applyCScaleBiasNHWCReluMaskHalfKernel), dim3(grid),dim3(threads), 0, 0, (const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,sSize,cSize); else hipLaunchKernelGGL(( applyCScaleBiasNHWCReluMaskKernel), dim3(grid),dim3(threads), 0, 0, (const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,sSize,cSize); } else { if(isHalf) hipLaunchKernelGGL(( applyCScaleBiasNHWCMaskHalfKernel), dim3(grid),dim3(threads), 0, 0, (const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,sSize,cSize); else hipLaunchKernelGGL(( applyCScaleBiasNHWCMaskKernel), dim3(grid),dim3(threads), 0, 0, (const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,sSize,cSize); } } } void customCudaApplyCScaleBiasNHWC(const float* in, float* out, const float* scale, const float* biases, const float* mask, int nSize, int xySize, int cSize, bool applyRelu) { sharedApplyCScaleBiasNHWC(in,out,scale,biases,mask,nSize,xySize,cSize,false,applyRelu); } void customCudaApplyCScaleBiasNHWC(const half* in, half* out, const half* scale, const half* biases, const half* mask, int nSize, int xySize, int cSize, bool applyRelu) { sharedApplyCScaleBiasNHWC(in,out,scale,biases,mask,nSize,xySize,cSize,true,applyRelu); }
867620b8c7551e5be30c8577ccc75817991cedee.cu
#include "../neuralnet/cudahelpers.h" #include <stdexcept> #if __CUDA_ARCH__ >= 530 #define CUDA_SUPPORTS_FP16 #endif //TODO maybe tune this number, it varies by GPU static const int targetNumThreads = 512; //-------------------------------------------------------------------------------------------------------------- template <typename T> __global__ void channelConcatKernel( const T* inA, const T* inB, T* out, int chwA, int chwB, int numBlocksA, int numBlocksB, int n ) { if(blockIdx.x < numBlocksA) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < chwA) { int nchwA = n*chwA; int chwOut = (chwA+chwB); int aIdx = index; int outIdx = index; while(aIdx < nchwA) { out[outIdx] = inA[aIdx]; aIdx += chwA; outIdx += chwOut; } } } else { int index = (blockIdx.x - numBlocksA) * blockDim.x + threadIdx.x; if(index < chwB) { int nchwB = n*chwB; int chwOut = (chwA+chwB); int bIdx = index; int outIdx = chwA+index; while(bIdx < nchwB) { out[outIdx] = inB[bIdx]; bIdx += chwB; outIdx += chwOut; } } } } template <typename T> void customCudaChannelConcatTemplate(const T* inA, const T* inB, T* out, int chwA, int chwB, int n) { int blockSize = targetNumThreads; int numBlocksA = (chwA + blockSize-1) / blockSize; int numBlocksB = (chwB + blockSize-1) / blockSize; int numBlocks = numBlocksA + numBlocksB; channelConcatKernel<<<numBlocks, blockSize>>>(inA,inB,out,chwA,chwB,numBlocksA,numBlocksB,n); } template void customCudaChannelConcatTemplate<float>(const float* inA, const float* inB, float* out, int chwA, int chwB, int n); template void customCudaChannelConcatTemplate<half>(const half* inA, const half* inB, half* out, int chwA, int chwB, int n); void customCudaChannelConcat(const float* inA, const float* inB, float* out, int chwA, int chwB, int n) { customCudaChannelConcatTemplate<float>(inA,inB,out,chwA,chwB,n); } void customCudaChannelConcat(const half* inA, const half* inB, half* out, int chwA, int chwB, int n) { customCudaChannelConcatTemplate<half>(inA,inB,out,chwA,chwB,n); } //-------------------------------------------------------------------------------------------------------------- template <typename T> __global__ void extractChannel0KernelNHWC(const T *in, T* out, int nhwSize, int cSize) { int nhwIdx = blockIdx.x * blockDim.x + threadIdx.x; if(nhwIdx < nhwSize) { out[nhwIdx] = in[nhwIdx*cSize]; } } template <typename T> void customCudaChannel0ExtractNHWCTemplate(const T *in, T* out, int n, int hw, int c) { int nhw = n*hw; int blockSize = targetNumThreads; int numBlocks = (nhw+blockSize-1)/blockSize; extractChannel0KernelNHWC<<<numBlocks,blockSize>>>(in,out,nhw,c); } template <typename T> __global__ void extractChannel0KernelNCHW(const T *in, T* out, int nSize, int cSize, int hwSize) { int hwIdx = blockIdx.x * blockDim.x + threadIdx.x; int nIdx = blockIdx.y * blockDim.y + threadIdx.y; if(hwIdx < hwSize && nIdx < nSize) { out[nIdx * hwSize + hwIdx] = in[nIdx * cSize * hwSize + hwIdx]; } } template <typename T> void customCudaChannel0ExtractNCHWTemplate(const T *in, T* out, int nSize, int cSize, int hwSize) { int hwThreads; int hwBlocks; int nThreads; int nBlocks; if(hwSize > targetNumThreads) { hwThreads = targetNumThreads/2; hwBlocks = (hwSize + hwThreads - 1) / hwThreads; nThreads = 1; nBlocks = nSize; } else if(hwSize > targetNumThreads/2) { hwThreads = hwSize; hwBlocks = 1; nThreads = 1; nBlocks = nSize; } else { hwThreads = hwSize; hwBlocks = 1; nThreads = targetNumThreads / hwSize; nBlocks = (nSize + nThreads - 1) / nThreads; } if(nBlocks > 65536) throw std::runtime_error("customCudaChannel0ExtractNCHW: nSize too large given hwSize"); dim3 grid(hwBlocks,nBlocks,1); dim3 threads(hwThreads,nThreads,1); extractChannel0KernelNCHW<<<grid,threads>>>(in,out,nSize,cSize,hwSize); } void customCudaChannel0ExtractNCHW(const float* in, float* out, int n, int c, int hw) { customCudaChannel0ExtractNCHWTemplate<float>(in,out,n,c,hw); } void customCudaChannel0ExtractNCHW(const half* in, half* out, int n, int c, int hw) { customCudaChannel0ExtractNCHWTemplate<half>(in,out,n,c,hw); } void customCudaChannel0ExtractNHWC(const float* in, float* out, int n, int hw, int c) { customCudaChannel0ExtractNHWCTemplate<float>(in,out,n,hw,c); } void customCudaChannel0ExtractNHWC(const half* in, half* out, int n, int hw, int c) { customCudaChannel0ExtractNHWCTemplate<half>(in,out,n,hw,c); } //-------------------------------------------------------------------------------------------------------------- // template <typename T> // struct linear_index_to_row_index : public thrust::unary_function<T,T> { // T len; // __host__ __device__ linear_index_to_row_index(T len) : len(len) {} // __host__ __device__ T operator()(T i) { return i / len; } // }; // void customCudaPoolRowsSumNCHW(float* in, float* out, int nc, int xy) { // thrust::device_ptr<float> inThrust = thrust::device_pointer_cast(in); // thrust::device_ptr<float> outThrust = thrust::device_pointer_cast(out); // thrust::reduce_by_key( // thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(xy)), // thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(xy)) + (nc*xy), // inThrust, // thrust::make_discard_iterator(), // outThrust // ); // } // void customCudaPoolRowsMaxNCHW(float* in, float* out, int nc, int xy) { // thrust::device_ptr<float> inThrust = thrust::device_pointer_cast(in); // thrust::device_ptr<float> outThrust = thrust::device_pointer_cast(out); // thrust::reduce_by_key( // thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(xy)), // thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(xy)) + (nc*xy), // inThrust, // thrust::make_discard_iterator(), // outThrust, // thrust::equal_to<int>(), // thrust::maximum<float>() // ); // } __global__ void sumChannelsNCHWKernel(const float* in, float* out, int cSize, int xySize, float scaleSum) { extern __shared__ float sumPoolNCHWShared[]; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; float acc = 0.0f; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { acc += in[xyIdx + cIdx * xySize + nIdx * xycSize]; xyIdx += xyBlockDim; } sumPoolNCHWShared[sharedIdx] = acc; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumPoolNCHWShared[sharedIdx] += sumPoolNCHWShared[sharedIdx + s]; } __syncthreads(); } if(xyId == 0 && cIdx < cSize) out[cIdx + nIdx * cSize] = sumPoolNCHWShared[sharedIdx] * scaleSum; } __global__ void valueHeadPoolChannelsNCHWKernel(const float* in, float* out, int nSize, int cSize, int xySize, const float* maskSum) { extern __shared__ float sumPoolNCHWShared[]; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; float acc = 0.0f; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { acc += in[xyIdx + cIdx * xySize + nIdx * xycSize]; xyIdx += xyBlockDim; } sumPoolNCHWShared[sharedIdx] = acc; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumPoolNCHWShared[sharedIdx] += sumPoolNCHWShared[sharedIdx + s]; } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumPoolNCHWShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * cSize*3] = mean; out[cIdx + nIdx * cSize*3 + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f; out[cIdx + nIdx * cSize*3 + cSize*2] = mean * ((sqrtdiv - 14.0f) * (sqrtdiv - 14.0f) * 0.01f - 0.1f); } } __global__ void maxPositiveChannelsNCHWKernel(const float* in, float* out, int cSize, int xySize) { extern __shared__ float maxPoolNCHWShared[]; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; if(cIdx < cSize) { float acc = 0.0f; int xyIdx = xyId; while(xyIdx < xySize) { acc = fmaxf(acc, in[xyIdx + cIdx * xySize + nIdx * xycSize]); xyIdx += xyBlockDim; } maxPoolNCHWShared[sharedIdx] = acc; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { maxPoolNCHWShared[sharedIdx] = fmaxf(maxPoolNCHWShared[sharedIdx], maxPoolNCHWShared[sharedIdx + s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) out[cIdx + nIdx * cSize] = maxPoolNCHWShared[sharedIdx]; } __global__ void sumAndMaxPositiveChannelsNCHWKernel(const float* in, float* out, int cSize, int xySize, float scaleSum, int sharedMemElts) { extern __shared__ float poolNCHWShared[]; float* sumShared = (float*)poolNCHWShared; float* maxShared = (float*)poolNCHWShared + sharedMemElts; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; if(cIdx < cSize) { float accSum = 0.0f; float accMax = 0.0f; int xyIdx = xyId; while(xyIdx < xySize) { float a = in[xyIdx + cIdx * xySize + nIdx * xycSize]; accSum += a; accMax = fmaxf(accMax, a); xyIdx += xyBlockDim; } sumShared[sharedIdx] = accSum; maxShared[sharedIdx] = accMax; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx], maxShared[sharedIdx + s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { out[cIdx + nIdx * (cSize*2)] = sumShared[sharedIdx] * scaleSum; out[cIdx + nIdx * (cSize*2) + cSize] = maxShared[sharedIdx]; } } __global__ void gPoolChannelsNCHWKernel(const float* in, float* out, int cSize, int xySize, const float* maskSum, int sharedMemElts) { extern __shared__ float poolNCHWShared[]; float* sumShared = (float*)poolNCHWShared; float* maxShared = (float*)poolNCHWShared + sharedMemElts; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; if(cIdx < cSize) { float accSum = 0.0f; float accMax = 0.0f; int xyIdx = xyId; while(xyIdx < xySize) { float a = in[xyIdx + cIdx * xySize + nIdx * xycSize]; accSum += a; accMax = fmaxf(accMax, a); xyIdx += xyBlockDim; } sumShared[sharedIdx] = accSum; maxShared[sharedIdx] = accMax; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx], maxShared[sharedIdx + s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * (cSize*3)] = mean; out[cIdx + nIdx * (cSize*3) + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f; out[cIdx + nIdx * (cSize*3) + cSize*2] = maxShared[sharedIdx]; } } void customCudaPoolRowsSumNCHW(const float* in, float* out, int nSize, int cSize, int xySize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); sumChannelsNCHWKernel<<<grid,threads,sharedMemSize>>>(in,out,cSize,xySize,scaleSum); } void customCudaValueHeadPoolNCHW(const float* in, float* out, int nSize, int cSize, int xySize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaValueHeadPoolNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaValueHeadPoolNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); valueHeadPoolChannelsNCHWKernel<<<grid,threads,sharedMemSize>>>(in,out,nSize,cSize,xySize,maskSum); } void customCudaPoolRowsMaxPositiveNCHW(const float* in, float* out, int nSize, int cSize, int xySize) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsMaxPositiveNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsMaxPositiveNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); maxPositiveChannelsNCHWKernel<<<grid,threads,sharedMemSize>>>(in,out,cSize,xySize); } void customCudaPoolRowsSumAndMaxPositiveNCHW(const float* in, float* out, int nSize, int cSize, int xySize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); sumAndMaxPositiveChannelsNCHWKernel<<<grid,threads,sharedMemSize>>>(in,out,cSize,xySize,scaleSum,sharedMemElts); } void customCudaPoolRowsGPoolNCHW(const float* in, float* out, int nSize, int cSize, int xySize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); gPoolChannelsNCHWKernel<<<grid,threads,sharedMemSize>>>(in,out,cSize,xySize,maskSum,sharedMemElts); } //-------------------------------------------------------------------------------------------------------------- #ifdef CUDA_SUPPORTS_FP16 __global__ void sumAndMaxPositiveChannelsNCHWHalfKernel(const half* in, half* out, int cSize, int xySize, float scaleSum, int sharedMemElts) { extern __shared__ float poolNCHWShared[]; float* sumShared = (float*)poolNCHWShared; float* maxShared = (float*)poolNCHWShared + sharedMemElts; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; if(cIdx < cSize) { float accSum = 0.0f; float accMax = 0.0f; int xyIdx = xyId; while(xyIdx < xySize) { float a = __half2float(in[xyIdx + cIdx * xySize + nIdx * xycSize]); accSum += a; accMax = fmaxf(accMax, a); xyIdx += xyBlockDim; } sumShared[sharedIdx] = accSum; maxShared[sharedIdx] = accMax; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx], maxShared[sharedIdx + s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { out[cIdx + nIdx * (cSize*2)] = __float2half(sumShared[sharedIdx] * scaleSum); out[cIdx + nIdx * (cSize*2) + cSize] = __float2half(maxShared[sharedIdx]); } } __global__ void gPoolChannelsNCHWHalfKernel(const half* in, half* out, int cSize, int xySize, const float* maskSum, int sharedMemElts) { extern __shared__ float poolNCHWShared[]; float* sumShared = (float*)poolNCHWShared; float* maxShared = (float*)poolNCHWShared + sharedMemElts; int xyId = threadIdx.x; int xyBlockDim = blockDim.x; int cId = threadIdx.y; int cBlockDim = blockDim.y; int cIdx = blockIdx.y * cBlockDim + cId; int nIdx = blockIdx.z; int xycSize = xySize*cSize; int sharedIdx = xyId + cId * xyBlockDim; if(cIdx < cSize) { float accSum = 0.0f; float accMax = 0.0f; int xyIdx = xyId; while(xyIdx < xySize) { float a = __half2float(in[xyIdx + cIdx * xySize + nIdx * xycSize]); accSum += a; accMax = fmaxf(accMax, a); xyIdx += xyBlockDim; } sumShared[sharedIdx] = accSum; maxShared[sharedIdx] = accMax; } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx], maxShared[sharedIdx + s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * (cSize*3)] = __float2half(mean); out[cIdx + nIdx * (cSize*3) + cSize] = __float2half(mean * (sqrtdiv - 14.0f) * 0.1f); out[cIdx + nIdx * (cSize*3) + cSize*2] = __float2half(maxShared[sharedIdx]); } } #else __global__ void sumAndMaxPositiveChannelsNCHWHalfKernel(const half* in, half* out, int cSize, int xySize, float scaleSum, int sharedMemElts) { //Do nothing, FP16 not supported } __global__ void gPoolChannelsNCHWHalfKernel(const half* in, half* out, int cSize, int xySize, const float* maskSum, int sharedMemElts) { //Do nothing, FP16 not supported } #endif void customCudaPoolRowsSumAndMaxPositiveNCHW(const half* in, half* out, int nSize, int cSize, int xySize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); sumAndMaxPositiveChannelsNCHWHalfKernel<<<grid,threads,sharedMemSize>>>(in,out,cSize,xySize,scaleSum,sharedMemElts); } void customCudaPoolRowsGPoolNCHW(const half* in, half* out, int nSize, int cSize, int xySize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNCHW: cSize too large"); //Use up as many threads as possible along the xy dimension. int xyThreads = 1; while(xyThreads < targetNumThreads && xyThreads < xySize/2) xyThreads *= 2; //Distribute the extra threads along the c dimension. int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads); int cBlocks = (cSize + cThreads - 1) / cThreads; //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(1,cBlocks,nSize); dim3 threads(xyThreads,cThreads,1); gPoolChannelsNCHWHalfKernel<<<grid,threads,sharedMemSize>>>(in,out,cSize,xySize,maskSum,sharedMemElts); } //-------------------------------------------------------------------------------------------------------------- __global__ void sumChannelsNHWCKernel(const float* in, float* out, int xySize, int cSize, float scaleSum) { extern __shared__ float sumPoolNHWCShared[]; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumPoolNHWCShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { sumPoolNHWCShared[sharedIdx] += in[cIdx + xyIdx * cSize + nIdx * xycSize]; xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumPoolNHWCShared[sharedIdx] += sumPoolNHWCShared[sharedIdx + cBlockDim * s]; } __syncthreads(); } if(xyId == 0 && cIdx < cSize) out[cIdx + nIdx * cSize] = sumPoolNHWCShared[sharedIdx] * scaleSum; } __global__ void valueHeadPoolChannelsNHWCKernel(const float* in, float* out, int nSize, int xySize, int cSize, const float* maskSum) { extern __shared__ float sumPoolNHWCShared[]; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumPoolNHWCShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { sumPoolNHWCShared[sharedIdx] += in[cIdx + xyIdx * cSize + nIdx * xycSize]; xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumPoolNHWCShared[sharedIdx] += sumPoolNHWCShared[sharedIdx + cBlockDim * s]; } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumPoolNHWCShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * cSize*3] = mean; out[cIdx + nIdx * cSize*3 + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f; out[cIdx + nIdx * cSize*3 + cSize*2] = mean * ((sqrtdiv - 14.0f) * (sqrtdiv - 14.0f) * 0.01f - 0.1f); } } __global__ void maxPositiveChannelsNHWCKernel(const float* in, float* out, int xySize, int cSize) { extern __shared__ float maxPoolNHWCShared[]; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; maxPoolNHWCShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { maxPoolNHWCShared[sharedIdx] = fmaxf(maxPoolNHWCShared[sharedIdx],in[cIdx + xyIdx * cSize + nIdx * xycSize]); xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { maxPoolNHWCShared[sharedIdx] = fmaxf(maxPoolNHWCShared[sharedIdx],maxPoolNHWCShared[sharedIdx + cBlockDim * s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) out[cIdx + nIdx * cSize] = maxPoolNHWCShared[sharedIdx]; } __global__ void sumAndMaxPositiveChannelsNHWCKernel(const float* in, float* out, int xySize, int cSize, float scaleSum, int sharedMemElts) { extern __shared__ float poolNHWCShared[]; float* sumShared = (float*)poolNHWCShared; float* maxShared = (float*)poolNHWCShared + sharedMemElts; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumShared[sharedIdx] = 0; maxShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { float a = in[cIdx + xyIdx * cSize + nIdx * xycSize]; sumShared[sharedIdx] += a; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],a); xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + cBlockDim * s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],maxShared[sharedIdx + cBlockDim * s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { out[cIdx + nIdx * (cSize*2)] = sumShared[sharedIdx] * scaleSum; out[cIdx + nIdx * (cSize*2) + cSize] = maxShared[sharedIdx]; } } __global__ void gPoolChannelsNHWCKernel(const float* in, float* out, int xySize, int cSize, const float* maskSum, int sharedMemElts) { extern __shared__ float poolNHWCShared[]; float* sumShared = (float*)poolNHWCShared; float* maxShared = (float*)poolNHWCShared + sharedMemElts; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumShared[sharedIdx] = 0; maxShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { float a = in[cIdx + xyIdx * cSize + nIdx * xycSize]; sumShared[sharedIdx] += a; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],a); xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + cBlockDim * s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],maxShared[sharedIdx + cBlockDim * s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * (cSize*3)] = mean; out[cIdx + nIdx * (cSize*3) + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f; out[cIdx + nIdx * (cSize*3) + cSize*2] = maxShared[sharedIdx]; } } void customCudaPoolRowsSumNHWC(const float* in, float* out, int nSize, int xySize, int cSize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); sumChannelsNHWCKernel<<<grid,threads,sharedMemSize>>>(in,out,xySize,cSize,scaleSum); } void customCudaValueHeadPoolNHWC(const float* in, float* out, int nSize, int xySize, int cSize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaValueHeadPoolNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaValueHeadPoolNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); valueHeadPoolChannelsNHWCKernel<<<grid,threads,sharedMemSize>>>(in,out,nSize,xySize,cSize,maskSum); } void customCudaPoolRowsMaxPositiveNHWC(const float* in, float* out, int nSize, int xySize, int cSize) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsMaxPositiveNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsMaxPositiveNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread int sharedMemSize = sizeof(float) * cThreads * xyThreads; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); maxPositiveChannelsNHWCKernel<<<grid,threads,sharedMemSize>>>(in,out,xySize,cSize); } void customCudaPoolRowsSumAndMaxPositiveNHWC(const float* in, float* out, int nSize, int xySize, int cSize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); sumAndMaxPositiveChannelsNHWCKernel<<<grid,threads,sharedMemSize>>>(in,out,xySize,cSize,scaleSum,sharedMemElts); } void customCudaPoolRowsGPoolNHWC(const float* in, float* out, int nSize, int xySize, int cSize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); gPoolChannelsNHWCKernel<<<grid,threads,sharedMemSize>>>(in,out,xySize,cSize,maskSum,sharedMemElts); } //-------------------------------------------------------------------------------------------------------------- #ifdef CUDA_SUPPORTS_FP16 __global__ void sumAndMaxPositiveChannelsNHWCHalfKernel(const half* in, half* out, int xySize, int cSize, float scaleSum, int sharedMemElts) { extern __shared__ float poolNHWCShared[]; float* sumShared = (float*)poolNHWCShared; float* maxShared = (float*)poolNHWCShared + sharedMemElts; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumShared[sharedIdx] = 0; maxShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { float a = __half2float(in[cIdx + xyIdx * cSize + nIdx * xycSize]); sumShared[sharedIdx] += a; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],a); xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + cBlockDim * s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],maxShared[sharedIdx + cBlockDim * s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { out[cIdx + nIdx * (cSize*2)] = __float2half(sumShared[sharedIdx] * scaleSum); out[cIdx + nIdx * (cSize*2) + cSize] = __float2half(maxShared[sharedIdx]); } } __global__ void gPoolChannelsNHWCHalfKernel(const half* in, half* out, int xySize, int cSize, const float* maskSum, int sharedMemElts) { extern __shared__ float poolNHWCShared[]; float* sumShared = (float*)poolNHWCShared; float* maxShared = (float*)poolNHWCShared + sharedMemElts; int cId = threadIdx.x; int cBlockDim = blockDim.x; int xyId = threadIdx.y; int xyBlockDim = blockDim.y; int cIdx = blockIdx.x * cBlockDim + cId; int nIdx = blockIdx.z; int sharedIdx = cId + cBlockDim * xyId; int xycSize = xySize*cSize; sumShared[sharedIdx] = 0; maxShared[sharedIdx] = 0; if(cIdx < cSize) { int xyIdx = xyId; while(xyIdx < xySize) { float a = __half2float(in[cIdx + xyIdx * cSize + nIdx * xycSize]); sumShared[sharedIdx] += a; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],a); xyIdx += xyBlockDim; } } __syncthreads(); for(int s = xyBlockDim>>1; s > 0; s >>= 1) { if(xyId < s) { sumShared[sharedIdx] += sumShared[sharedIdx + cBlockDim * s]; maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],maxShared[sharedIdx + cBlockDim * s]); } __syncthreads(); } if(xyId == 0 && cIdx < cSize) { float sum = sumShared[sharedIdx]; float div = maskSum[nIdx]; float sqrtdiv = sqrt(div); float mean = sum/div; out[cIdx + nIdx * (cSize*3)] = __float2half(mean); out[cIdx + nIdx * (cSize*3) + cSize] = __float2half(mean * (sqrtdiv - 14.0f) * 0.1f); out[cIdx + nIdx * (cSize*3) + cSize*2] = __float2half(maxShared[sharedIdx]); } } #else __global__ void sumAndMaxPositiveChannelsNHWCHalfKernel(const half* in, half* out, int xySize, int cSize, float scaleSum, int sharedMemElts) { //Do nothing, FP16 not supported } __global__ void gPoolChannelsNHWCHalfKernel(const half* in, half* out, int xySize, int cSize, const float* maskSum, int sharedMemElts) { //Do nothing, FP16 not supported } #endif void customCudaPoolRowsSumAndMaxPositiveNHWC(const half* in, half* out, int nSize, int xySize, int cSize, float scaleSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsSumAndMaxPositiveNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); sumAndMaxPositiveChannelsNHWCHalfKernel<<<grid,threads,sharedMemSize>>>(in,out,xySize,cSize,scaleSum,sharedMemElts); } void customCudaPoolRowsGPoolNHWC(const half* in, half* out, int nSize, int xySize, int cSize, const float* maskSum) { if(nSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNHWC: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaPoolRowsGPoolNHWC: cSize too large"); //Use up to two warps worth of threads along the channel dimension, which is the //most compact int cThreads = 1; while(cThreads < 64 && cThreads < cSize/2) cThreads *= 2; int cBlocks = (cSize + cThreads - 1) / cThreads; //Distribute the extra threads to perform parallel reduction along the xy dimension. int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads); //We need one shared memory spot per thread, and then we double it because we need both sum and max. //We also make sure it's a power of two to address any alignment concerns. int sharedMemElts = 128; while(sharedMemElts < cThreads * xyThreads) sharedMemElts *= 2; int sharedMemSize = sizeof(float) * sharedMemElts * 2; dim3 grid(cBlocks,1,nSize); dim3 threads(cThreads,xyThreads,1); gPoolChannelsNHWCHalfKernel<<<grid,threads,sharedMemSize>>>(in,out,xySize,cSize,maskSum,sharedMemElts); } //-------------------------------------------------------------------------------------------------------------- __global__ void nchwTransposeKernel(const float *in, float* out, int xSize, int ySize, int tileDim, int tileStride, int xySize) { //+1 avoids bank conflicts extern __shared__ float tileNCHW[]; int tileDimP1 = tileDim+1; int xIdx = blockIdx.x * tileDim + threadIdx.x; int yIdx = blockIdx.y * tileDim + threadIdx.y; int nc = blockIdx.z; if(xIdx < xSize) { for(int j = 0; j < tileDim && yIdx+j < ySize; j += tileStride) { int inIdx = xIdx + xSize * (yIdx+j) + xySize * nc; tileNCHW[(threadIdx.y+j)*tileDimP1 + threadIdx.x] = in[inIdx]; } } __syncthreads(); //Transpose idx int outXIdx = blockIdx.y * tileDim + threadIdx.x; int outYIdx = blockIdx.x * tileDim + threadIdx.y; if(outXIdx < ySize) { for(int j = 0; j < tileDim && outYIdx+j < xSize; j += tileStride) { int outIdx = outXIdx + ySize * (outYIdx+j) + xySize * nc; out[outIdx] = tileNCHW[threadIdx.x*tileDimP1 + threadIdx.y+j]; } } } __global__ void nhwcTransposeKernel(const float *in, float* out, int xSize, int ySize, int cSize, int tileDim, int tileStride, int xycSize) { //+1 reduces bank conflicts extern __shared__ float tileNHWC[]; int tileDimP1 = tileDim+1; int xIdx = blockIdx.x * tileDim + threadIdx.x; int yIdx = blockIdx.y * tileDim + threadIdx.y; int cIdx = threadIdx.z; int n = blockIdx.z; if(xIdx < xSize) { for(int j = 0; j < tileDim && yIdx+j < ySize; j += tileStride) { int inIdx = cIdx + cSize * (xIdx + xSize * (yIdx+j)) + xycSize * n; tileNHWC[cIdx + cSize * ((threadIdx.y+j)*tileDimP1 + threadIdx.x)] = in[inIdx]; } } __syncthreads(); //Transpose idx int outXIdx = blockIdx.y * tileDim + threadIdx.x; int outYIdx = blockIdx.x * tileDim + threadIdx.y; if(outXIdx < ySize) { for(int j = 0; j < tileDim && outYIdx+j < xSize; j += tileStride) { int outIdx = cIdx + cSize * (outXIdx + ySize * (outYIdx+j)) + xycSize * n; out[outIdx] = tileNHWC[cIdx + cSize * (threadIdx.x*tileDimP1 + threadIdx.y+j)]; } } } __global__ void nchwTransposeHalfKernel(const half *in, half* out, int xSize, int ySize, int tileDim, int tileStride, int xySize) { //+1 avoids bank conflicts extern __shared__ half tileNCHWHALF[]; int tileDimP1 = tileDim+1; int xIdx = blockIdx.x * tileDim + threadIdx.x; int yIdx = blockIdx.y * tileDim + threadIdx.y; int nc = blockIdx.z; if(xIdx < xSize) { for(int j = 0; j < tileDim && yIdx+j < ySize; j += tileStride) { int inIdx = xIdx + xSize * (yIdx+j) + xySize * nc; tileNCHWHALF[(threadIdx.y+j)*tileDimP1 + threadIdx.x] = in[inIdx]; } } __syncthreads(); //Transpose idx int outXIdx = blockIdx.y * tileDim + threadIdx.x; int outYIdx = blockIdx.x * tileDim + threadIdx.y; if(outXIdx < ySize) { for(int j = 0; j < tileDim && outYIdx+j < xSize; j += tileStride) { int outIdx = outXIdx + ySize * (outYIdx+j) + xySize * nc; out[outIdx] = tileNCHWHALF[threadIdx.x*tileDimP1 + threadIdx.y+j]; } } } __global__ void nhwcTransposeHalfKernel(const half *in, half* out, int xSize, int ySize, int cSize, int tileDim, int tileStride, int xycSize) { //+1 reduces bank conflicts extern __shared__ half tileNHWCHALF[]; int tileDimP1 = tileDim+1; int xIdx = blockIdx.x * tileDim + threadIdx.x; int yIdx = blockIdx.y * tileDim + threadIdx.y; int cIdx = threadIdx.z; int n = blockIdx.z; if(xIdx < xSize) { for(int j = 0; j < tileDim && yIdx+j < ySize; j += tileStride) { int inIdx = cIdx + cSize * (xIdx + xSize * (yIdx+j)) + xycSize * n; tileNHWCHALF[cIdx + cSize * ((threadIdx.y+j)*tileDimP1 + threadIdx.x)] = in[inIdx]; } } __syncthreads(); //Transpose idx int outXIdx = blockIdx.y * tileDim + threadIdx.x; int outYIdx = blockIdx.x * tileDim + threadIdx.y; if(outXIdx < ySize) { for(int j = 0; j < tileDim && outYIdx+j < xSize; j += tileStride) { int outIdx = cIdx + cSize * (outXIdx + ySize * (outYIdx+j)) + xycSize * n; out[outIdx] = tileNHWCHALF[cIdx + cSize * (threadIdx.x*tileDimP1 + threadIdx.y+j)]; } } } static void sharedNCHWTranspose(const void *in, void* out, int xSize, int ySize, int ncSize, bool isHalf) { if(ncSize > 65536) throw std::runtime_error("customCudaNCHWTranspose: ncSize too large"); //TODO maybe tune these numbers, it varies by GPU //The first one should be the warp size, since it's set to what we need to avoid bank conflicts? //Or is it better to just make it xSize, to reduce overhead on top of 19x19? int tileDim = 32; int tileStride = targetNumThreads/tileDim; dim3 grid((xSize+tileDim-1)/tileDim,(ySize+tileDim-1)/tileDim,ncSize); dim3 threads(tileDim,tileStride,1); if(isHalf) { int sharedMemSize = sizeof(half)*tileDim*(tileDim+1); nchwTransposeHalfKernel<<<grid,threads,sharedMemSize>>>((const half*)in,(half*)out,xSize,ySize,tileDim,tileStride,xSize*ySize); } else { int sharedMemSize = sizeof(float)*tileDim*(tileDim+1); nchwTransposeKernel<<<grid,threads,sharedMemSize>>>((const float*)in,(float*)out,xSize,ySize,tileDim,tileStride,xSize*ySize); } } void customCudaNCHWTranspose(const float *in, float* out, int xSize, int ySize, int ncSize) { sharedNCHWTranspose(in,out,xSize,ySize,ncSize,false); } void customCudaNCHWTranspose(const half *in, half* out, int xSize, int ySize, int ncSize) { sharedNCHWTranspose(in,out,xSize,ySize,ncSize,true); } void sharedNHWCTranspose(const void *in, void* out, int xSize, int ySize, int cSize, int nSize, bool isHalf) { if(cSize > 64) throw std::runtime_error("customCudaNHWCTranspose: cSize too large"); int tileDim = 1; while(tileDim * 2 * cSize <= targetNumThreads) tileDim *= 2; int tileStride = 1; if(tileDim > 32) { tileStride = tileDim / 32; tileDim = 32; } dim3 grid((xSize+tileDim-1)/tileDim,(ySize+tileDim-1)/tileDim,nSize); dim3 threads(tileDim,tileStride,cSize); if(isHalf) { int sharedMemSize = sizeof(half)*tileDim*(tileDim+1)*cSize; nhwcTransposeHalfKernel<<<grid,threads,sharedMemSize>>>((const half*)in,(half*)out,xSize,ySize,cSize,tileDim,tileStride,xSize*ySize*cSize); } else { int sharedMemSize = sizeof(float)*tileDim*(tileDim+1)*cSize; nhwcTransposeKernel<<<grid,threads,sharedMemSize>>>((const float*)in,(float*)out,xSize,ySize,cSize,tileDim,tileStride,xSize*ySize*cSize); } } void customCudaNHWCTranspose(const float *in, float* out, int xSize, int ySize, int cSize, int nSize) { sharedNHWCTranspose(in,out,xSize,ySize,cSize,nSize,false); } void customCudaNHWCTranspose(const half *in, half* out, int xSize, int ySize, int cSize, int nSize) { sharedNHWCTranspose(in,out,xSize,ySize,cSize,nSize,true); } //-------------------------------------------------------------------------------------------------------------- template <typename T> __global__ void mirrorKernel(const T *in, T* out, int mSize, int subSize) { int subIdx = blockIdx.x * blockDim.x + threadIdx.x; int mIdx = blockIdx.y * blockDim.y + threadIdx.y; int batchIdx = blockIdx.z; if(subIdx < subSize && mIdx < mSize) { int inIdx = subIdx + subSize * (mIdx + mSize * batchIdx); int outIdx = subIdx + subSize * ((mSize-mIdx-1) + mSize * batchIdx); out[outIdx] = in[inIdx]; } } template <typename T> void customCudaMirrorTemplate(const T *in, T* out, int batchSize, int mSize, int subSize) { if(batchSize > 65536) throw std::runtime_error("customCudaMirror: batchSize too large"); if(mSize > 65536) throw std::runtime_error("customCudaMirror: mSize too large"); int subThreads; int subBlocks; int mThreads; int mBlocks; if(subSize > targetNumThreads) { subThreads = targetNumThreads/2; subBlocks = (subSize + subThreads - 1) / subThreads; mThreads = 1; mBlocks = mSize; } else if(subSize > targetNumThreads/2) { subThreads = subSize; subBlocks = 1; mThreads = 1; mBlocks = mSize; } else { subThreads = subSize; subBlocks = 1; mThreads = targetNumThreads / subSize; mBlocks = (mSize + mThreads - 1) / mThreads; } dim3 grid(subBlocks,mBlocks,batchSize); dim3 threads(subThreads,mThreads,1); mirrorKernel<<<grid,threads>>>(in,out,mSize,subSize); } template <typename T> void customCudaMirrorNCHWTemplate(const T *in, T* out, int batchSize, int cSize, int ySize, int xSize, bool mirrorY, bool mirrorX) { if(mirrorY && mirrorX) customCudaMirrorTemplate(in,out,batchSize*cSize,ySize*xSize,1); else if(mirrorY) customCudaMirrorTemplate(in,out,batchSize*cSize,ySize,xSize); else if(mirrorX) customCudaMirrorTemplate(in,out,batchSize*cSize*ySize,xSize,1); else cudaMemcpyAsync(out,in,sizeof(T)*batchSize*cSize*ySize*xSize,cudaMemcpyDeviceToDevice); } template <typename T> void customCudaMirrorNHWCTemplate(const T *in, T* out, int batchSize, int ySize, int xSize, int cSize, bool mirrorY, bool mirrorX) { if(mirrorY && mirrorX) customCudaMirrorTemplate(in,out,batchSize,ySize*xSize,cSize); else if(mirrorY) customCudaMirrorTemplate(in,out,batchSize,ySize,xSize*cSize); else if(mirrorX) customCudaMirrorTemplate(in,out,batchSize*ySize,xSize,cSize); else cudaMemcpyAsync(out,in,sizeof(T)*batchSize*ySize*xSize*cSize,cudaMemcpyDeviceToDevice); } void customCudaMirror(const float *in, float* out, int batchSize, int mSize, int subSize) { customCudaMirrorTemplate<float>(in,out,batchSize,mSize,subSize); } void customCudaMirrorNCHW(const float *in, float* out, int batchSize, int cSize, int ySize, int xSize, bool mirrorY, bool mirrorX) { customCudaMirrorNCHWTemplate<float>(in,out,batchSize,cSize,ySize,xSize,mirrorY,mirrorX); } void customCudaMirrorNHWC(const float *in, float* out, int batchSize, int ySize, int xSize, int cSize, bool mirrorY, bool mirrorX) { customCudaMirrorNHWCTemplate<float>(in,out,batchSize,ySize,xSize,cSize,mirrorY,mirrorX); } void customCudaMirror(const half *in, half* out, int batchSize, int mSize, int subSize) { customCudaMirrorTemplate<half>(in,out,batchSize,mSize,subSize); } void customCudaMirrorNCHW(const half *in, half* out, int batchSize, int cSize, int ySize, int xSize, bool mirrorY, bool mirrorX) { customCudaMirrorNCHWTemplate<half>(in,out,batchSize,cSize,ySize,xSize,mirrorY,mirrorX); } void customCudaMirrorNHWC(const half *in, half* out, int batchSize, int ySize, int xSize, int cSize, bool mirrorY, bool mirrorX) { customCudaMirrorNHWCTemplate<half>(in,out,batchSize,ySize,xSize,cSize,mirrorY,mirrorX); } //-------------------------------------------------------------------------------------------------------------- __global__ void copyToHalfKernel(const float *in, half* out, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < n) { out[idx] = __float2half(in[idx]); } } __global__ void copyFromHalfKernel(const half *in, float* out, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < n) { out[idx] = __half2float(in[idx]); } } void customCudaCopyToHalf(const float* in, half* out, int n) { int blockSize = targetNumThreads; int numBlocks = (n+blockSize-1)/blockSize; copyToHalfKernel<<<numBlocks, blockSize>>>(in,out,n); } void customCudaCopyFromHalf(const half* in, float* out, int n) { int blockSize = targetNumThreads; int numBlocks = (n+blockSize-1)/blockSize; copyFromHalfKernel<<<numBlocks, blockSize>>>(in,out,n); } //-------------------------------------------------------------------------------------------------------------- #ifdef CUDA_SUPPORTS_FP16 __global__ void addTensorInplaceHalfKernel(half *buf, const half* biases, int nSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < nSize) { buf[idx] = __hadd(buf[idx],biases[idx]); } } #else __global__ void addTensorInplaceHalfKernel(half *buf, const half* biases, int nSize) { //Do nothing, FP16 not supported } #endif void customCudaAddTensorInplace(half* buf, const half* biases, int nSize) { int blockSize = targetNumThreads; int numBlocks = (nSize+blockSize-1)/blockSize; addTensorInplaceHalfKernel<<<numBlocks, blockSize>>>(buf,biases,nSize); } //-------------------------------------------------------------------------------------------------------------- __global__ void addCBiasInplaceNCKernel(float *buf, const float* biases, int nSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int nIdx = blockIdx.y * blockDim.y + threadIdx.y; if(cIdx < cSize && nIdx < nSize) { int idx = nIdx * cSize + cIdx; buf[idx] = buf[idx] + biases[cIdx]; } } #ifdef CUDA_SUPPORTS_FP16 __global__ void addCBiasInplaceNCHalfKernel(half *buf, const half* biases, int nSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int nIdx = blockIdx.y * blockDim.y + threadIdx.y; if(cIdx < cSize && nIdx < nSize) { int idx = nIdx * cSize + cIdx; buf[idx] = __hadd(buf[idx],biases[cIdx]); } } #else __global__ void addCBiasInplaceNCHalfKernel(half *buf, const half* biases, int nSize, int cSize) { //Do nothing, FP16 not supported } #endif void sharedAddCBiasInplaceNC(void* buf, const void* biases, int nSize, int cSize, bool isHalf) { int cThreads; int cBlocks; int nThreads; int nBlocks; if(cSize > targetNumThreads) { cThreads = targetNumThreads/2; cBlocks = (cSize + cThreads - 1) / cThreads; nThreads = 1; nBlocks = nSize; } else if(cSize > targetNumThreads/2) { cThreads = cSize; cBlocks = 1; nThreads = 1; nBlocks = nSize; } else { cThreads = cSize; cBlocks = 1; nThreads = targetNumThreads / cSize; nBlocks = (nSize + nThreads - 1) / nThreads; } if(nBlocks > 65536) throw std::runtime_error("customCudaAddCBiasInplaceNC: nSize too large given cSize"); dim3 grid(cBlocks,nBlocks,1); dim3 threads(cThreads,nThreads,1); if(isHalf) addCBiasInplaceNCHalfKernel<<<grid,threads>>>((half*)buf,(const half*)biases,nSize,cSize); else addCBiasInplaceNCKernel<<<grid,threads>>>((float*)buf,(const float*)biases,nSize,cSize); } void customCudaAddCBiasInplaceNC(float* buf, const float* biases, int nSize, int cSize) { sharedAddCBiasInplaceNC(buf,biases,nSize,cSize,false); } void customCudaAddCBiasInplaceNC(half* buf, const half* biases, int nSize, int cSize) { sharedAddCBiasInplaceNC(buf,biases,nSize,cSize,true); } //-------------------------------------------------------------------------------------------------------------- __global__ void addNCBiasInplaceNCHWKernel(float *buf, const float* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int ncIdx = nIdx * cSize + cIdx; int idx = ncIdx * sSize + sIdx; buf[idx] = buf[idx] + biases[ncIdx]; } } #ifdef CUDA_SUPPORTS_FP16 __global__ void addNCBiasInplaceNCHWHalfKernel(half *buf, const half* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int ncIdx = nIdx * cSize + cIdx; int idx = ncIdx * sSize + sIdx; buf[idx] = __hadd(buf[idx],biases[ncIdx]); } } #else __global__ void addNCBiasInplaceNCHWHalfKernel(half *buf, const half* biases, int cSize, int sSize) { //Do nothing, FP16 not supported } #endif void sharedAddNCBiasInplaceNCHW(void *buf, const void* biases, int nSize, int cSize, int xySize, bool isHalf) { if(nSize > 65536) throw std::runtime_error("customCudaAddNCBiasInplaceNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaAddNCBiasInplaceNCHW: cSize too large"); int sSize = xySize; int sThreads; int sBlocks; int cThreads; int cBlocks; if(sSize > targetNumThreads) { sThreads = targetNumThreads/2; sBlocks = (sSize + sThreads - 1) / sThreads; cThreads = 1; cBlocks = cSize; } else if(sSize > targetNumThreads/2) { sThreads = sSize; sBlocks = 1; cThreads = 1; cBlocks = cSize; } else { sThreads = sSize; sBlocks = 1; cThreads = targetNumThreads / sSize; cBlocks = (cSize + cThreads - 1) / cThreads; } dim3 grid(sBlocks,cBlocks,nSize); dim3 threads(sThreads,cThreads,1); if(isHalf) addNCBiasInplaceNCHWHalfKernel<<<grid,threads>>>((half*)buf,(const half*)biases,cSize,sSize); else addNCBiasInplaceNCHWKernel<<<grid,threads>>>((float*)buf,(const float*)biases,cSize,sSize); } void customCudaAddNCBiasInplaceNCHW(float *buf, const float* biases, int nSize, int cSize, int xySize) { sharedAddNCBiasInplaceNCHW(buf,biases,nSize,cSize,xySize,false); } void customCudaAddNCBiasInplaceNCHW(half *buf, const half* biases, int nSize, int cSize, int xySize) { sharedAddNCBiasInplaceNCHW(buf,biases,nSize,cSize,xySize,true); } //-------------------------------------------------------------------------------------------------------------- __global__ void addNCBiasInplaceNHWCKernel(float *buf, const float* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int ncIdx = nIdx * cSize + cIdx; int idx = (nIdx * sSize + sIdx) * cSize + cIdx; buf[idx] = buf[idx] + biases[ncIdx]; } } #ifdef CUDA_SUPPORTS_FP16 __global__ void addNCBiasInplaceNHWCHalfKernel(half *buf, const half* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int ncIdx = nIdx * cSize + cIdx; int idx = (nIdx * sSize + sIdx) * cSize + cIdx; buf[idx] = __hadd(buf[idx],biases[ncIdx]); } } #else __global__ void addNCBiasInplaceNHWCHalfKernel(half *buf, const half* biases, int sSize, int cSize) { //Do nothing, FP16 not supported } #endif void sharedAddNCBiasInplaceNHWC(void *buf, const void* biases, int nSize, int xySize, int cSize, bool isHalf) { if(nSize > 65536) throw std::runtime_error("customCudaAddNCBiasInplaceNHWC: nSize too large"); if(xySize > 65536) throw std::runtime_error("customCudaAddNCBiasInplaceNHWC: xySize too large"); int sSize = xySize; int cThreads; int cBlocks; int sThreads; int sBlocks; if(cSize > targetNumThreads) { cThreads = targetNumThreads/2; cBlocks = (cSize + cThreads - 1) / cThreads; sThreads = 1; sBlocks = sSize; } else if(cSize > targetNumThreads/2) { cThreads = cSize; cBlocks = 1; sThreads = 1; sBlocks = sSize; } else { cThreads = cSize; cBlocks = 1; sThreads = targetNumThreads / cSize; sBlocks = (sSize + sThreads - 1) / sThreads; } dim3 grid(cBlocks,sBlocks,nSize); dim3 threads(cThreads,sThreads,1); if(isHalf) addNCBiasInplaceNHWCHalfKernel<<<grid,threads>>>((half*)buf,(const half*)biases,sSize,cSize); else addNCBiasInplaceNHWCKernel<<<grid,threads>>>((float*)buf,(const float*)biases,sSize,cSize); } void customCudaAddNCBiasInplaceNHWC(float *buf, const float* biases, int nSize, int xySize, int cSize) { sharedAddNCBiasInplaceNHWC(buf,biases,nSize,xySize,cSize,false); } void customCudaAddNCBiasInplaceNHWC(half *buf, const half* biases, int nSize, int xySize, int cSize) { sharedAddNCBiasInplaceNHWC(buf,biases,nSize,xySize,cSize,true); } //-------------------------------------------------------------------------------------------------------------- __global__ void applyCScaleBiasNCHWKernel(const float *in, float* out, const float* scale, const float* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = in[idx] * scale[cIdx] + biases[cIdx]; } } __global__ void applyCScaleBiasNCHWReluKernel(const float *in, float* out, const float* scale, const float* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f); } } __global__ void applyCScaleBiasNCHWMaskKernel(const float *in, float* out, const float* scale, const float* biases, const float* mask, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = (in[idx] * scale[cIdx] + biases[cIdx]) * mask[nIdx*sSize+sIdx]; } } __global__ void applyCScaleBiasNCHWReluMaskKernel(const float *in, float* out, const float* scale, const float* biases, const float* mask, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f) * mask[nIdx*sSize+sIdx]; } } #ifdef CUDA_SUPPORTS_FP16 __global__ void applyCScaleBiasNCHWHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = __hfma(in[idx],scale[cIdx],biases[cIdx]); } } __global__ void applyCScaleBiasNCHWReluHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; half a = __hfma(in[idx],scale[cIdx],biases[cIdx]); const half halfzero = __float2half(0.0f); out[idx] = __hgt(a,halfzero) ? a : halfzero; } } __global__ void applyCScaleBiasNCHWMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; out[idx] = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]); } } __global__ void applyCScaleBiasNCHWReluMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize) { int sIdx = blockIdx.x * blockDim.x + threadIdx.x; int cIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * cSize + cIdx) * sSize + sIdx; half a = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]); const half halfzero = __float2half(0.0f); out[idx] = __hgt(a,halfzero) ? a : halfzero; } } #else __global__ void applyCScaleBiasNCHWHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNCHWReluHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNCHWMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNCHWReluMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize) { //Do nothing, FP16 not supported } #endif void sharedApplyCScaleBiasNCHW(const void* in, void* out, const void* scale, const void* biases, const void* mask, int nSize, int cSize, int xySize, bool isHalf, bool applyRelu) { if(nSize > 65536) throw std::runtime_error("customCudaApplyCScaleBiasNCHW: nSize too large"); if(cSize > 65536) throw std::runtime_error("customCudaApplyCScaleBiasNCHW: cSize too large"); int sSize = xySize; int sThreads; int sBlocks; int cThreads; int cBlocks; if(sSize > targetNumThreads) { sThreads = targetNumThreads/2; sBlocks = (sSize + sThreads - 1) / sThreads; cThreads = 1; cBlocks = cSize; } else if(sSize > targetNumThreads/2) { sThreads = sSize; sBlocks = 1; cThreads = 1; cBlocks = cSize; } else { sThreads = sSize; sBlocks = 1; cThreads = targetNumThreads / sSize; cBlocks = (cSize + cThreads - 1) / cThreads; } dim3 grid(sBlocks,cBlocks,nSize); dim3 threads(sThreads,cThreads,1); if(mask == NULL) { if(applyRelu) { if(isHalf) applyCScaleBiasNCHWReluHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,cSize,sSize); else applyCScaleBiasNCHWReluKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,cSize,sSize); } else { if(isHalf) applyCScaleBiasNCHWHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,cSize,sSize); else applyCScaleBiasNCHWKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,cSize,sSize); } } else { if(applyRelu) { if(isHalf) applyCScaleBiasNCHWReluMaskHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,cSize,sSize); else applyCScaleBiasNCHWReluMaskKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,cSize,sSize); } else { if(isHalf) applyCScaleBiasNCHWMaskHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,cSize,sSize); else applyCScaleBiasNCHWMaskKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,cSize,sSize); } } } void customCudaApplyCScaleBiasNCHW(const float* in, float* out, const float* scale, const float* biases, const float* mask, int nSize, int cSize, int xySize, bool applyRelu) { sharedApplyCScaleBiasNCHW(in,out,scale,biases,mask,nSize,cSize,xySize,false,applyRelu); } void customCudaApplyCScaleBiasNCHW(const half* in, half* out, const half* scale, const half* biases, const half* mask, int nSize, int cSize, int xySize, bool applyRelu) { sharedApplyCScaleBiasNCHW(in,out,scale,biases,mask,nSize,cSize,xySize,true,applyRelu); } //-------------------------------------------------------------------------------------------------------------- __global__ void applyCScaleBiasNHWCKernel(const float* in, float* out, const float* scale, const float* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = in[idx] * scale[cIdx] + biases[cIdx]; } } __global__ void applyCScaleBiasNHWCReluKernel(const float* in, float* out, const float* scale, const float* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f); } } __global__ void applyCScaleBiasNHWCMaskKernel(const float* in, float* out, const float* scale, const float* biases, const float* mask, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = (in[idx] * scale[cIdx] + biases[cIdx]) * mask[nIdx*sSize+sIdx]; } } __global__ void applyCScaleBiasNHWCReluMaskKernel(const float* in, float* out, const float* scale, const float* biases, const float* mask, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f) * mask[nIdx*sSize+sIdx]; } } #ifdef CUDA_SUPPORTS_FP16 __global__ void applyCScaleBiasNHWCHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = __hfma(in[idx],scale[cIdx],biases[cIdx]); } } __global__ void applyCScaleBiasNHWCReluHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; half a = __hfma(in[idx],scale[cIdx],biases[cIdx]); const half halfzero = __float2half(0.0f); out[idx] = __hgt(a,halfzero) ? a : halfzero; } } __global__ void applyCScaleBiasNHWCMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; out[idx] = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]); } } __global__ void applyCScaleBiasNHWCReluMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize) { int cIdx = blockIdx.x * blockDim.x + threadIdx.x; int sIdx = blockIdx.y * blockDim.y + threadIdx.y; int nIdx = blockIdx.z; if(cIdx < cSize && sIdx < sSize) { int idx = (nIdx * sSize + sIdx) * cSize + cIdx; half a = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]); const half halfzero = __float2half(0.0f); out[idx] = __hgt(a,halfzero) ? a : halfzero; } } #else __global__ void applyCScaleBiasNHWCHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNHWCReluHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNHWCMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize) { //Do nothing, FP16 not supported } __global__ void applyCScaleBiasNHWCReluMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize) { //Do nothing, FP16 not supported } #endif void sharedApplyCScaleBiasNHWC(const void* in, void* out, const void* scale, const void* biases, const void* mask, int nSize, int xySize, int cSize, bool isHalf, bool applyRelu) { if(nSize > 65536) throw std::runtime_error("customCudaApplyCScaleBiasNHWC: nSize too large"); if(xySize > 65536) throw std::runtime_error("customCudaApplyCScaleBiasNHWC: xySize too large"); int sSize = xySize; int cThreads; int cBlocks; int sThreads; int sBlocks; if(cSize > targetNumThreads) { cThreads = targetNumThreads/2; cBlocks = (cSize + cThreads - 1) / cThreads; sThreads = 1; sBlocks = sSize; } else if(cSize > targetNumThreads/2) { cThreads = cSize; cBlocks = 1; sThreads = 1; sBlocks = sSize; } else { cThreads = cSize; cBlocks = 1; sThreads = targetNumThreads / cSize; sBlocks = (sSize + sThreads - 1) / sThreads; } dim3 grid(cBlocks,sBlocks,nSize); dim3 threads(cThreads,sThreads,1); if(mask == NULL) { if(applyRelu) { if(isHalf) applyCScaleBiasNHWCReluHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,sSize,cSize); else applyCScaleBiasNHWCReluKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,sSize,cSize); } else { if(isHalf) applyCScaleBiasNHWCHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,sSize,cSize); else applyCScaleBiasNHWCKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,sSize,cSize); } } else { if(applyRelu) { if(isHalf) applyCScaleBiasNHWCReluMaskHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,sSize,cSize); else applyCScaleBiasNHWCReluMaskKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,sSize,cSize); } else { if(isHalf) applyCScaleBiasNHWCMaskHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,sSize,cSize); else applyCScaleBiasNHWCMaskKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,sSize,cSize); } } } void customCudaApplyCScaleBiasNHWC(const float* in, float* out, const float* scale, const float* biases, const float* mask, int nSize, int xySize, int cSize, bool applyRelu) { sharedApplyCScaleBiasNHWC(in,out,scale,biases,mask,nSize,xySize,cSize,false,applyRelu); } void customCudaApplyCScaleBiasNHWC(const half* in, half* out, const half* scale, const half* biases, const half* mask, int nSize, int xySize, int cSize, bool applyRelu) { sharedApplyCScaleBiasNHWC(in,out,scale,biases,mask,nSize,xySize,cSize,true,applyRelu); }
985f4fa1793f935171b4cb399a60200a7d24cb40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zmgesellcmmv.cu normal z -> d, Tue Feb 9 16:05:44 2016 */ #include "magmasparse_internal.h" #define PRECISION_d //#define TEXTURE // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_1_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.x; // local row int idy = threadIdx.y; // vector int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idx; // global row index if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int max_ = (drowptr[ bdx+1 ]-offset)/blocksize; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + idx + blocksize*k ]; int col = dcolind[ offset + idx + blocksize*k ]; dot += val * dx[ col*num_vecs+idy ]; } if (betazero) { dy[ row+idy*num_rows ] = dot*alpha; } else { dy[ row+idy*num_rows ] = dot*alpha + beta*dy [ row+idy*num_rows ]; } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_4_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ double shared[]; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if ( idx < 2 ) { shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; } else { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row+vec]; } } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_8_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, const double * __restrict__ dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ double shared[]; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if ( idx < 4 ) { shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; } else { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row+vec]; } } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_16_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ double shared[]; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if ( idx < 8 ) { shared[ldz]+=shared[ldz+blocksize*8]; __syncthreads(); if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; } else { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row+vec]; } } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_32_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ double shared[]; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if ( idx < 16 ) { shared[ldz]+=shared[ldz+blocksize*16]; __syncthreads(); if ( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8]; __syncthreads(); if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; } else { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row+vec]; } } } } } /************************* same but using texture mem *************************/ // SELLP SpMV kernel 2D grid - for large number of vectors // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_1_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) int idx = threadIdx.x; // local row int idy = threadIdx.y; // vector int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idx; // global row index if (row < num_rows ) { double dot1 = MAGMA_D_MAKE(0.0, 0.0); double dot2 = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int max_ = (drowptr[ bdx+1 ]-offset)/blocksize; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + idx + blocksize*k ]; int col = num_vecs * dcolind[ offset + idx + blocksize*k ]; int4 v = tex1Dfetch<int4>(texdx, col/2 + idy ); dot1 += val * __hiloint2double(v.y, v.x); dot2 += val * __hiloint2double(v.w, v.z); } if (betazero) { dy[row+num_rows*idy*2] = dot1*alpha; dy[row+num_rows*idy*2+num_rows] = dot2*alpha; } else { dy[row+num_rows*idy*2] = dot1*alpha + beta*dy [row*num_vecs+idy*2]; dy[row+num_rows*idy*2+num_rows] = dot2*alpha + beta*dy [row*num_vecs+idy*2+1]; } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_4_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ double shared[]; if (row < num_rows ) { double dot1 = MAGMA_D_MAKE(0.0, 0.0); double dot2 = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = num_vecs * dcolind[ offset + ldx + block*k ]; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2double(v.y, v.x); dot2 += val * __hiloint2double(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if ( idx < 2 ) { shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } else { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2]; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2+1]; } } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_8_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ double shared[]; if (row < num_rows ) { double dot1 = MAGMA_D_MAKE(0.0, 0.0); double dot2 = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = num_vecs * dcolind[ offset + ldx + block*k ]; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2double(v.y, v.x); dot2 += val * __hiloint2double(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if ( idx < 4 ) { shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; __syncthreads(); if ( idx < 2 ) { shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } else { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2]; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2+1]; } } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_16_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ double shared[]; if (row < num_rows ) { double dot1 = MAGMA_D_MAKE(0.0, 0.0); double dot2 = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = num_vecs * dcolind[ offset + ldx + block*k ]; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2double(v.y, v.x); dot2 += val * __hiloint2double(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if ( idx < 8 ) { shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; __syncthreads(); if ( idx < 4 ) { shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if ( idx < 2 ) { shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } else { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2]; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2+1]; } } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_32_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ double shared[]; if (row < num_rows ) { double dot1 = MAGMA_D_MAKE(0.0, 0.0); double dot2 = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = num_vecs * dcolind[ offset + ldx + block*k ]; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2double(v.y, v.x); dot2 += val * __hiloint2double(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if ( idx < 16 ) { shared[ldz]+=shared[ldz+blocksize*16]; shared[ldz+sv]+=shared[ldz+sv+blocksize*16]; __syncthreads(); if ( idx < 8 ) { shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; } if ( idx < 4 ) { shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if ( idx < 2 ) { shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } else { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2]; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2+1]; } } } } #endif } /** Purpose ------- This routine computes Y = alpha * A^t * X + beta * Y on the GPU. Input format is SELLP. Note, that the input format for X is row-major while the output format for Y is column major! Arguments --------- @param[in] transA magma_trans_t transpose A? @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs magma_int_t number of columns in X and Y @param[in] blocksize magma_int_t number of rows in one ELL-slice @param[in] slices magma_int_t number of slices in matrix @param[in] alignment magma_int_t number of threads assigned to one row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in SELLP @param[in] dcolind magmaIndex_ptr columnindices of A in SELLP @param[in] drowptr magmaIndex_ptr rowpointer of SELLP @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { // using a 3D thread grid for small num_vecs, a 2D grid otherwise int texture=0, kepler=0, precision=0; magma_int_t arch = magma_getdevice_arch(); if ( arch > 300 ) kepler = 1; #if defined(PRECISION_d) precision = 1; #endif #if defined(TEXTURE) texture = 1; #endif if ( (texture==1) && (precision==1) && (kepler==1) ) { // Create channel. hipChannelFormatDesc channel_desc; channel_desc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindSigned); // Create resource descriptor. struct hipResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = hipResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)dx; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(double); // Specify texture object parameters. struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; // Create texture object. hipTextureObject_t texdx = 0; hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed printf("error: number of vectors has to be multiple of 2.\n"); return MAGMA_ERR_NOT_SUPPORTED; } if ( num_vecs > 8 ) // avoid running into memory problems alignment = 1; int num_threads = (num_vecs/2) * blocksize*alignment; // every thread handles two vectors if ( num_threads > 1024 ) printf("error: too many threads requested.\n"); dim3 block( blocksize, alignment, num_vecs/2 ); int dimgrid1 = int( sqrt( double( slices ))); int dimgrid2 = magma_ceildiv( slices, dimgrid1 ); dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_vecs * blocksize*alignment * sizeof( double ); if ( alignment == 1) { dim3 block( blocksize, num_vecs/2, 1 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex<true>), dim3(grid), dim3(block), 0, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex<false>), dim3(grid), dim3(block), 0, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else if ( alignment == 4) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else if ( alignment == 8) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else if ( alignment == 16) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else if ( alignment == 32) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { printf("error: alignment %d not supported.\n", int(alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } } else { if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed printf("error: number of vectors has to be multiple of 2.\n"); return MAGMA_ERR_NOT_SUPPORTED; } if ( num_vecs > 8 ) // avoid running into memory problems alignment = 1; int num_threads = num_vecs * blocksize*alignment; // every thread handles two vectors if ( num_threads > 1024 ) printf("error: too many threads requested.\n"); int dimgrid1 = int( sqrt( double( slices ))); int dimgrid2 = magma_ceildiv( slices, dimgrid1 ); dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_threads * sizeof( double ); if ( alignment == 1) { dim3 block( blocksize, num_vecs/2, 1 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D<true>), dim3(grid), dim3(block), 0, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D<false>), dim3(grid), dim3(block), 0, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else if ( alignment == 4) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else if ( alignment == 8) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else if ( alignment == 16) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else if ( alignment == 32) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { printf("error: alignment %d not supported.\n", int(alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } } return MAGMA_SUCCESS; }
985f4fa1793f935171b4cb399a60200a7d24cb40.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zmgesellcmmv.cu normal z -> d, Tue Feb 9 16:05:44 2016 */ #include "magmasparse_internal.h" #define PRECISION_d //#define TEXTURE // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_1_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.x; // local row int idy = threadIdx.y; // vector int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idx; // global row index if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int max_ = (drowptr[ bdx+1 ]-offset)/blocksize; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + idx + blocksize*k ]; int col = dcolind[ offset + idx + blocksize*k ]; dot += val * dx[ col*num_vecs+idy ]; } if (betazero) { dy[ row+idy*num_rows ] = dot*alpha; } else { dy[ row+idy*num_rows ] = dot*alpha + beta*dy [ row+idy*num_rows ]; } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_4_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ double shared[]; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if ( idx < 2 ) { shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; } else { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row+vec]; } } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_8_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, const double * __restrict__ dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ double shared[]; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if ( idx < 4 ) { shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; } else { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row+vec]; } } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_16_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ double shared[]; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if ( idx < 8 ) { shared[ldz]+=shared[ldz+blocksize*8]; __syncthreads(); if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; } else { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row+vec]; } } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_32_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ double shared[]; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if ( idx < 16 ) { shared[ldz]+=shared[ldz+blocksize*16]; __syncthreads(); if ( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8]; __syncthreads(); if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; } else { dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row+vec]; } } } } } /************************* same but using texture mem *************************/ // SELLP SpMV kernel 2D grid - for large number of vectors // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_1_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) int idx = threadIdx.x; // local row int idy = threadIdx.y; // vector int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idx; // global row index if (row < num_rows ) { double dot1 = MAGMA_D_MAKE(0.0, 0.0); double dot2 = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int max_ = (drowptr[ bdx+1 ]-offset)/blocksize; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + idx + blocksize*k ]; int col = num_vecs * dcolind[ offset + idx + blocksize*k ]; int4 v = tex1Dfetch<int4>(texdx, col/2 + idy ); dot1 += val * __hiloint2double(v.y, v.x); dot2 += val * __hiloint2double(v.w, v.z); } if (betazero) { dy[row+num_rows*idy*2] = dot1*alpha; dy[row+num_rows*idy*2+num_rows] = dot2*alpha; } else { dy[row+num_rows*idy*2] = dot1*alpha + beta*dy [row*num_vecs+idy*2]; dy[row+num_rows*idy*2+num_rows] = dot2*alpha + beta*dy [row*num_vecs+idy*2+1]; } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_4_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ double shared[]; if (row < num_rows ) { double dot1 = MAGMA_D_MAKE(0.0, 0.0); double dot2 = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = num_vecs * dcolind[ offset + ldx + block*k ]; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2double(v.y, v.x); dot2 += val * __hiloint2double(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if ( idx < 2 ) { shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } else { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2]; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2+1]; } } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_8_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ double shared[]; if (row < num_rows ) { double dot1 = MAGMA_D_MAKE(0.0, 0.0); double dot2 = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = num_vecs * dcolind[ offset + ldx + block*k ]; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2double(v.y, v.x); dot2 += val * __hiloint2double(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if ( idx < 4 ) { shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; __syncthreads(); if ( idx < 2 ) { shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } else { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2]; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2+1]; } } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_16_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ double shared[]; if (row < num_rows ) { double dot1 = MAGMA_D_MAKE(0.0, 0.0); double dot2 = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = num_vecs * dcolind[ offset + ldx + block*k ]; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2double(v.y, v.x); dot2 += val * __hiloint2double(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if ( idx < 8 ) { shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; __syncthreads(); if ( idx < 4 ) { shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if ( idx < 2 ) { shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } else { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2]; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2+1]; } } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zmgesellptmv_kernel_32_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ double shared[]; if (row < num_rows ) { double dot1 = MAGMA_D_MAKE(0.0, 0.0); double dot2 = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = num_vecs * dcolind[ offset + ldx + block*k ]; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2double(v.y, v.x); dot2 += val * __hiloint2double(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if ( idx < 16 ) { shared[ldz]+=shared[ldz+blocksize*16]; shared[ldz+sv]+=shared[ldz+sv+blocksize*16]; __syncthreads(); if ( idx < 8 ) { shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; } if ( idx < 4 ) { shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if ( idx < 2 ) { shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if ( idx == 0 ) { if (betazero) { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } else { dy[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2]; dy[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*dy [row*num_vecs+idz*2+1]; } } } } #endif } /** Purpose ------- This routine computes Y = alpha * A^t * X + beta * Y on the GPU. Input format is SELLP. Note, that the input format for X is row-major while the output format for Y is column major! Arguments --------- @param[in] transA magma_trans_t transpose A? @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs magma_int_t number of columns in X and Y @param[in] blocksize magma_int_t number of rows in one ELL-slice @param[in] slices magma_int_t number of slices in matrix @param[in] alignment magma_int_t number of threads assigned to one row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in SELLP @param[in] dcolind magmaIndex_ptr columnindices of A in SELLP @param[in] drowptr magmaIndex_ptr rowpointer of SELLP @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { // using a 3D thread grid for small num_vecs, a 2D grid otherwise int texture=0, kepler=0, precision=0; magma_int_t arch = magma_getdevice_arch(); if ( arch > 300 ) kepler = 1; #if defined(PRECISION_d) precision = 1; #endif #if defined(TEXTURE) texture = 1; #endif if ( (texture==1) && (precision==1) && (kepler==1) ) { // Create channel. cudaChannelFormatDesc channel_desc; channel_desc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindSigned); // Create resource descriptor. struct cudaResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = cudaResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)dx; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(double); // Specify texture object parameters. struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; // Create texture object. cudaTextureObject_t texdx = 0; cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed printf("error: number of vectors has to be multiple of 2.\n"); return MAGMA_ERR_NOT_SUPPORTED; } if ( num_vecs > 8 ) // avoid running into memory problems alignment = 1; int num_threads = (num_vecs/2) * blocksize*alignment; // every thread handles two vectors if ( num_threads > 1024 ) printf("error: too many threads requested.\n"); dim3 block( blocksize, alignment, num_vecs/2 ); int dimgrid1 = int( sqrt( double( slices ))); int dimgrid2 = magma_ceildiv( slices, dimgrid1 ); dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_vecs * blocksize*alignment * sizeof( double ); if ( alignment == 1) { dim3 block( blocksize, num_vecs/2, 1 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_1_3D_tex<true><<< grid, block, 0, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else zmgesellptmv_kernel_1_3D_tex<false><<< grid, block, 0, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else if ( alignment == 4) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_4_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else zmgesellptmv_kernel_4_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else if ( alignment == 8) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_8_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else zmgesellptmv_kernel_8_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else if ( alignment == 16) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_16_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else zmgesellptmv_kernel_16_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else if ( alignment == 32) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_32_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else zmgesellptmv_kernel_32_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { printf("error: alignment %d not supported.\n", int(alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } } else { if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed printf("error: number of vectors has to be multiple of 2.\n"); return MAGMA_ERR_NOT_SUPPORTED; } if ( num_vecs > 8 ) // avoid running into memory problems alignment = 1; int num_threads = num_vecs * blocksize*alignment; // every thread handles two vectors if ( num_threads > 1024 ) printf("error: too many threads requested.\n"); int dimgrid1 = int( sqrt( double( slices ))); int dimgrid2 = magma_ceildiv( slices, dimgrid1 ); dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_threads * sizeof( double ); if ( alignment == 1) { dim3 block( blocksize, num_vecs/2, 1 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_1_3D<true><<< grid, block, 0, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else zmgesellptmv_kernel_1_3D<false><<< grid, block, 0, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else if ( alignment == 4) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_4_3D<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else zmgesellptmv_kernel_4_3D<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else if ( alignment == 8) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_8_3D<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else zmgesellptmv_kernel_8_3D<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else if ( alignment == 16) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_16_3D<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else zmgesellptmv_kernel_16_3D<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else if ( alignment == 32) { dim3 block( blocksize, alignment, num_vecs/2 ); if ( beta == MAGMA_D_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_32_3D<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else zmgesellptmv_kernel_32_3D<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, num_vecs, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { printf("error: alignment %d not supported.\n", int(alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } } return MAGMA_SUCCESS; }
888eeea132052511fc082e5f8f0ceb18df1758af.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "cuda_vector_routines.h" #include "isspa_class.h" #include "isspa_force_cuda.h" #include "constants.h" #include "hip/hip_runtime_api.h" using namespace std; // constants __constant__ int nTypes; __constant__ int nMC; __constant__ int nRs; __constant__ int nGRs; __constant__ int nERs; __constant__ int nAtoms; __constant__ int nPairs; __constant__ float2 box; __constant__ float2 forceRparams; __constant__ float2 gRparams; __constant__ float2 eRparams; // device functions // CUDA Kernels // atomic multiply __device__ float atomicMul(float* address, float val) { unsigned int* address_as_u = (unsigned int*)address; unsigned int old = *address_as_u, assumed; do { assumed = old; old = atomicCAS(address_as_u, assumed, __float_as_uint(val * __uint_as_float(assumed))); } while (assumed != old); return __uint_as_float(old); } // warp reduce a float using multiplication __inline__ __device__ float warpReduceMul(float val) { for (int offset = warpSize/2; offset > 0; offset /= 2) val *= __shfl_down(val, offset); return val; } // warp reduce a float4 __inline__ __device__ float4 warpReduceSumQuad(float4 val) { for (int offset = warpSize / 2; offset > 0; offset /= 2) { val.x += __shfl_down(val.x, offset); val.y += __shfl_down(val.y, offset); val.z += __shfl_down(val.z, offset); val.w += __shfl_down(val.w, offset); } return val; } // warp reduce a float4 but only the first three values __inline__ __device__ float4 warpReduceSumTriple(float4 val) { for (int offset = warpSize / 2; offset > 0; offset /= 2) { val.x += __shfl_down(val.x, offset); val.y += __shfl_down(val.y, offset); val.z += __shfl_down(val.z, offset); } return val; } // kernel to generate MC points around each atom __global__ void isspa_MC_points_kernel(float4 *xyz, float4 *mcpos, hiprandState_t *state, const float* __restrict__ rmax, int *isspaTypes) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int atom; int it; float r2; float rmax_l; float4 mcr; float4 mcpos_l; hiprandState_t threadState; atom = int(double(index)/double(nMC)); if (atom < nAtoms) { // load atom paramters it = __ldg(isspaTypes+atom); rmax_l = rmax[it]; mcpos_l = __ldg(xyz+atom); // initialize the random state threadState = state[index]; // generate point in constant density sphere do { mcr.x = fmaf(2.0f,hiprand_uniform(&threadState),-1.0f); mcr.y = fmaf(2.0f,hiprand_uniform(&threadState),-1.0f); mcr.z = fmaf(2.0f,hiprand_uniform(&threadState),-1.0f); r2 = mcr.x*mcr.x + mcr.y*mcr.y + mcr.z*mcr.z; } while (r2 >= 0.99f); // expand sphere and translate by atom position mcr *= rmax_l; mcpos_l += mcr; // initialize density at MC point to 1 mcpos_l.w = 1.0f; // save MC point and random state back to global memory mcpos[index] = mcpos_l; state[index] = threadState; } } // kernel to compute density and mean field at each MC point __global__ void __launch_bounds__(32, 8) isspa_field_kernel(float4 *xyz, const float* __restrict__ rmax, int *isspaTypes, const float* __restrict__ gTable, const float* __restrict__ eTable, float4 *mcpos, float *buf_mcpos, float4 *buf_enow, float4 *buf_e0now) { int atom; int bin; int jt; int mc; int tRow, tCol; int tileIdx; float rmax_l; float dist2, dist; float fracDist; float g1, g2; float e1, e2; float etab; float2 gRparams_l = gRparams; float2 eRparams_l = eRparams; float4 r; float4 mcpos_l; float4 enow_l; float4 e0now_l; // Determine the tile index to be calculated tileIdx = blockIdx.x; //obtain the number of rows and columns in tile matrix unsigned int nRows = ceil(double(nAtoms)/double(32.0)); unsigned int nCols = ceil(double(nMC*nAtoms)/double(32.0f)); // Determine the current tiles position in the tile matrix tRow = int(double(tileIdx)/double(nCols)); tCol = tileIdx-nCols*tRow; // Determine the MC point for the thread mc = tCol*32 + threadIdx.x; // Load in atom data in shared memory extern __shared__ float4 xyz_s[]; xyz_s[threadIdx.x] = xyz[32*tRow + threadIdx.x]; __syncthreads(); if (mc < nMC*nAtoms) { // zero the local variables that will be reduced mcpos_l = __ldg(mcpos+mc); mcpos_l.w = 1.0f; enow_l.x = enow_l.y = enow_l.z = enow_l.w = 0.0f; e0now_l.x = e0now_l.y = e0now_l.z = e0now_l.w = 0.0f; // loop over atoms in tile for each MC point in tile for (unsigned int offset = 0; offset < 32; offset++) { atom = 32*tRow + offset; // Load in position, atom type, and rmax of atom jt = __ldg(isspaTypes + atom); rmax_l = rmax[jt]; if (atom < nAtoms) { // Get constants for atom jt = __ldg(isspaTypes+atom); r = min_image(mcpos_l - xyz_s[offset],box.x,box.y); dist2 = r.x*r.x + r.y*r.y + r.z*r.z; dist = sqrtf(dist2); if (dist <= rmax_l) { e0now_l.w += 1.0f; // determine density bin of distance bin = int(__fdividef(dist-gRparams_l.x,gRparams_l.y)); // make sure bin is in limits of density table if (bin < 0) { mcpos_l.w = 0.0f; } else if (bin < nGRs-1) { // Push Density to MC point fracDist = __fdividef((dist - (gRparams_l.x+bin*gRparams_l.y)),gRparams_l.y); g1 = gTable[jt*nGRs+bin]; g2 = gTable[jt*nGRs+bin+1]; mcpos_l.w *= fmaf(g2,fracDist,g1*(1.0f-fracDist)); //mcpos_l.w = gTable[jt*nGRs+bin]; // Push mean field to MC point fracDist = __fdividef((dist - (eRparams_l.x+bin*eRparams_l.y)),eRparams_l.y); e1 = eTable[jt*nERs+bin]; e2 = eTable[jt*nERs+bin+1]; etab = fmaf(e2,fracDist,e1*(1.0f-fracDist)); //etab = eTable[jt*nGRs+bin]; enow_l += r*__fdividef(etab,dist); } } else { e0now_l -= r*__fdividef(e0*xyz_s[offset].w,dist2*dist); } enow_l -= r*__fdividef(e0*xyz_s[offset].w,dist2*dist); } } buf_mcpos[mc*nRows+tRow] = mcpos_l.w; buf_enow[mc*nRows+tRow] = enow_l; buf_e0now[mc*nRows+tRow] = e0now_l; } } __global__ void __launch_bounds__(1024,1) reduce_convert_kernel(float *buf_mcpos, float4 *buf_enow, float4 *buf_e0now, float4 *mcpos, float4 *enow, float4 *e0now) { float buf_mcpos_l; float4 buf_enow_l; float4 buf_e0now_l; int mc; buf_mcpos_l = 1.0f; buf_enow_l.x = buf_enow_l.y = buf_enow_l.z = 0.0f; buf_e0now_l.x = buf_e0now_l.y = buf_e0now_l.z = buf_e0now_l.w = 0.0f; mc = blockIdx.x*blockDim.x + threadIdx.x; if (mc < nMC*nAtoms) { unsigned int nRows = ceil(double(nAtoms)/double(32.0)); for (unsigned int offset = 0; offset < nRows; offset ++) { buf_mcpos_l *= buf_mcpos[mc*nRows+offset]; buf_enow_l.x += buf_enow[mc*nRows+offset].x; buf_enow_l.y += buf_enow[mc*nRows+offset].y; buf_enow_l.z += buf_enow[mc*nRows+offset].z; buf_e0now_l.x += buf_e0now[mc*nRows+offset].x; buf_e0now_l.y += buf_e0now[mc*nRows+offset].y; buf_e0now_l.z += buf_e0now[mc*nRows+offset].z; buf_e0now_l.w += buf_e0now[mc*nRows+offset].w; } mcpos[mc].w = buf_mcpos_l; enow[mc] = buf_enow_l; e0now[mc] = buf_e0now_l; } } __global__ void __launch_bounds__(32, 2) isspa_force_kernel(float4 *xyz, const float* __restrict__ rmax, const float* __restrict__ vtot, int *isspaTypes, const float* __restrict__ forceTable, float4 *f, float4 *enow, float4 *e0now, float4 *mcpos, float4 *isspaf) { int bin; int jt; int mc; int atom; int tRow, tCol; int tileIdx; float igo; float fs; float r0; float rmax_l; float vtot_l; float dist2, dist; float pdotr; float cothE; float c1,c2,c3; float dp1,dp2,dp3; float Rz; float f1, f2; float fracDist; float4 xyz_l; float4 r; float4 fi; float4 fj; // Determine the tile index to be calculated tileIdx = blockIdx.x; //obtain the number of rows and columns in tile matrix unsigned int nRows = ceil(double(nAtoms)/double(32.0f)); unsigned int nCols = ceil(double(nMC*nAtoms)/double(32.0f)); // Determine the current tiles position in the tile matrix tRow = int(double(tileIdx)/double(nCols)); tCol = tileIdx-nCols*tRow; // Determine the atom for the thread and lead in the parameters atom = 32*tRow + threadIdx.x; // Determine the atom parameters for this thread xyz_l = __ldg(xyz+atom); jt = __ldg(isspaTypes + atom); rmax_l = rmax[jt]; vtot_l = vtot[jt]; // Load in the density and electric fields for the MC points for this tile into shared memory __shared__ float4 mcpos_s[32]; __shared__ float4 enow_s[32]; __shared__ float4 e0now_s[32]; mc = tCol*32 + threadIdx.x; e0now_s[threadIdx.x] = e0now[mc]; enow_s[threadIdx.x] = enow[mc]; mcpos_s[threadIdx.x] = mcpos[mc]; igo = __fdividef(vtot_l,e0now_s[threadIdx.x].w); mcpos_s[threadIdx.x].w *= igo; r0 = norm3df(enow_s[threadIdx.x].x, enow_s[threadIdx.x].y, enow_s[threadIdx.x].z); enow_s[threadIdx.x].x = __fdividef(enow_s[threadIdx.x].x,r0); enow_s[threadIdx.x].y = __fdividef(enow_s[threadIdx.x].y,r0); enow_s[threadIdx.x].z = __fdividef(enow_s[threadIdx.x].z,r0); enow_s[threadIdx.x].w = r0; e0now_s[threadIdx.x].x = __fdividef(e0now_s[threadIdx.x].x,3.0f); e0now_s[threadIdx.x].y = __fdividef(e0now_s[threadIdx.x].y,3.0f); e0now_s[threadIdx.x].z = __fdividef(e0now_s[threadIdx.x].z,3.0f); e0now_s[threadIdx.x].w = igo; __syncthreads(); if (atom < nAtoms) { // Zero out the forces fi.x = fi.y = fi.z = 0.0f; //fj.x = fj.y = fj.z = 0.0f; // loop over the MC points for each atom for (unsigned int offset = 0; offset < 32; offset += 1) { // Determine the MC points from atom2 mc = tCol*32 + offset; if (mc < nMC*nAtoms) { // Calculate the distance between the MC point and atom1 r = min_image(mcpos_s[offset] - xyz_l,box.x,box.y); dist2 = r.x*r.x + r.y*r.y + r.z*r.z; dist = sqrtf(dist2); // Coulombic Force cothE=__fdividef(1.0f,tanhf(enow_s[offset].w)); c1=cothE-__fdividef(1.0f,enow_s[offset].w); c2=1.0f-2.0f*__fdividef(c1,enow_s[offset].w); c3=cothE-3.0f*__fdividef(c2,enow_s[offset].w); Rz=__fdividef(enow_s[offset].x*r.x+enow_s[offset].y*r.y+enow_s[offset].z*r.z,dist); dp1=3.0f*Rz; dp2=7.5f*Rz*Rz-1.5f; dp3=(17.50f*Rz*Rz-7.50f)*Rz; // Calculate dipole term fs = __fdividef(-xyz_l.w*p0*c1*mcpos_s[offset].w,dist2*dist); fi += fs*(r*__fdividef(dp1,dist)-enow_s[offset]); //fj += fs*(r*__fdividef(dp1,dist)-enow_s[offset]); // Calculate quadrapole term fs = __fdividef(-xyz_l.w*q0*(1.5f*c2-0.5f)*mcpos_s[offset].w,dist2*dist2); fi += fs*(r*__fdividef(dp2,dist)-dp1*enow_s[offset]); //fj += fs*(r*__fdividef(dp2,dist)-dp1*enow_s[offset]); // Calculate octapole term fs = __fdividef(-xyz_l.w*o0*(2.5f*c3-1.5f*c1)*mcpos_s[offset].w,dist2*dist2*dist); fi += fs*(r*__fdividef(dp3,dist)-dp2*enow_s[offset]); //fj += fs*(r*__fdividef(dp3,dist)-dp2*enow_s[offset]); // Lennard-Jones Force if (dist <= rmax_l) { bin = int ( __fdividef(dist-forceRparams.x,forceRparams.y) + 0.5f); if (bin >= (nRs)) { fs = 0.0f; } else { //Lennard-Jones Force fracDist = __fdividef((dist-(forceRparams.x+bin*forceRparams.y)),forceRparams.y); f1 = forceTable[jt*nRs+bin]; f2 = forceTable[jt*nRs+bin+1]; fs = (f1*(1.0-fracDist)+f2*fracDist)*mcpos_s[offset].w; fs = fmaf(f2,fracDist,f1*(1.0f-fracDist))*mcpos_s[offset].w; //fs = forceTable[jt*nRs+bin]*mcpos_s[offset].w; } fi += r*__fdividef(-fs,dist); //fj += r*__fdividef(-fs,dist); } else { // Constant Density Dielectric fs=__fdividef(-xyz_l.w*p0,dist2*dist); pdotr=__fdividef(3.0f*(e0now_s[offset].x*r.x+e0now_s[offset].y*r.y+e0now_s[offset].z*r.z),dist2); fi -= fs*(pdotr*r-e0now_s[offset])*e0now_s[offset].w; //fj -= fs*(pdotr*r-e0now_s[offset])*e0now_s[offset].w; } } } atomicAdd(&(f[atom].x), fi.x); atomicAdd(&(f[atom].y), fi.y); atomicAdd(&(f[atom].z), fi.z); //atomicAdd(&(isspaf[atom].x), fj.x); //atomicAdd(&(isspaf[atom].y), fj.y); //atomicAdd(&(isspaf[atom].z), fj.z); } } /* C wrappers for kernels */ float isspa_force_cuda(float4 *xyz_d, float4 *f_d, float4 *isspaf_d, isspa& isspas, int nAtoms_h) { //float isspa_force_cuda(float4 *xyz_d, float4 *f_d, isspa& isspas, int nAtoms_h) { float milliseconds; // timing hipEventRecord(isspas.isspaStart); hipProfilerStart(); // zero IS-SPA arrays on GPU hipMemset(isspas.enow_d, 0.0f, nAtoms_h*isspas.nMC*sizeof(float4)); hipMemset(isspas.e0now_d, 0.0f, nAtoms_h*isspas.nMC*sizeof(float4)); hipMemset(isspaf_d, 0.0f, nAtoms_h*sizeof(float4)); //hipMemset(isspas.buffer_mcpos_d, 1.0f, ceil(nAtoms_h/32.0)*nAtoms_h*isspas.nMC*sizeof(float)); //hipMemset(isspas.buffer_enow_d, 0.0f, ceil(nAtoms_h/32.0)*nAtoms_h*isspas.nMC*sizeof(float4)); //hipMemset(isspas.buffer_e0now_d, 0.0f, ceil(nAtoms_h/32.0)*nAtoms_h*isspas.nMC*sizeof(float4)); // compute position of each MC point hipLaunchKernelGGL(( isspa_MC_points_kernel), dim3(isspas.mcGridSize),dim3(isspas.mcBlockSize) , 0, 0, xyz_d, isspas.mcpos_d, isspas.randStates_d, isspas.rmax_d, isspas.isspaTypes_d); // compute densities and mean electric field value for each MC point hipLaunchKernelGGL(( isspa_field_kernel), dim3(isspas.fieldGridSize), dim3(isspas.fieldBlockSize), 32*sizeof(float4), 0, xyz_d, isspas.rmax_d, isspas.isspaTypes_d, isspas.isspaGTable_d, isspas.isspaETable_d, isspas.mcpos_d, isspas.buffer_mcpos_d, isspas.buffer_enow_d, isspas.buffer_e0now_d); // Reduce the fields and convert to polarizations hipLaunchKernelGGL(( reduce_convert_kernel), dim3(ceil(isspas.nMC*nAtoms_h/1024.0)),dim3(1024), 0, 0, isspas.buffer_mcpos_d, isspas.buffer_enow_d, isspas.buffer_e0now_d, isspas.mcpos_d, isspas.enow_d, isspas.e0now_d); // compute forces for each atom hipLaunchKernelGGL(( isspa_force_kernel), dim3(isspas.forceGridSize), dim3(isspas.forceBlockSize), 0, 0, xyz_d,isspas.rmax_d,isspas.vtot_d,isspas.isspaTypes_d,isspas.isspaForceTable_d,f_d,isspas.enow_d,isspas.e0now_d,isspas.mcpos_d,isspaf_d); hipDeviceSynchronize(); hipProfilerStop(); // finish timing hipEventRecord(isspas.isspaStop); hipEventSynchronize(isspas.isspaStop); hipEventElapsedTime(&milliseconds, isspas.isspaStart, isspas.isspaStop); return milliseconds; } void isspa_grid_block(int nAtoms_h, int nPairs_h, float lbox_h, isspa& isspas) { float2 box_h; int maxThreadsPerBlock = 1024; // determine gridSize and blockSize for MC kernel isspas.mcGridSize = int(ceil(isspas.nMC*nAtoms_h/ (float) maxThreadsPerBlock)); isspas.mcBlockSize = maxThreadsPerBlock; printf("Number of IS-SPA mc kernel blocks: %d \n", isspas.mcGridSize); printf("Number of IS-SPA mc kernel threads per block: %d \n", isspas.mcBlockSize); // determine gridSize and blockSize for field kernel isspas.fieldGridSize = ceil(nAtoms_h/32.0)*ceil(isspas.nMC*nAtoms_h/32.0); isspas.fieldBlockSize = 32; printf("Number of IS-SPA field kernel blocks: %d \n", isspas.fieldGridSize); printf("Number of IS-SPA field kernel threads per block: %d \n", isspas.fieldBlockSize); // determine gridSize and blockSize for force kernel //isspas.forceThreads = temp*32; isspas.forceGridSize = ceil(nAtoms_h/32.0)*ceil(isspas.nMC*nAtoms_h/32.0); isspas.forceBlockSize = 32; printf("Number of IS-SPA force kernel blocks: %d \n", isspas.forceGridSize); printf("Number of IS-SPA force kernel threads per block: %d \n", isspas.forceBlockSize); // fill box with box and half box length box_h.x = lbox_h; box_h.y = lbox_h/2.0f; // set constant memory hipMemcpyToSymbol(nMC, &isspas.nMC, sizeof(int)); hipMemcpyToSymbol(nTypes, &isspas.nTypes, sizeof(int)); hipMemcpyToSymbol(nRs, &isspas.nRs, sizeof(int)); hipMemcpyToSymbol(nGRs, &isspas.nGRs, sizeof(int)); hipMemcpyToSymbol(nERs, &isspas.nERs, sizeof(int)); hipMemcpyToSymbol(nAtoms, &nAtoms_h, sizeof(int)); hipMemcpyToSymbol(nPairs, &nPairs_h, sizeof(int)); hipMemcpyToSymbol(box, &box_h, sizeof(float2)); hipMemcpyToSymbol(forceRparams, &isspas.forceRparams, sizeof(float2)); hipMemcpyToSymbol(gRparams, &isspas.gRparams, sizeof(float2)); hipMemcpyToSymbol(eRparams, &isspas.eRparams, sizeof(float2)); }
888eeea132052511fc082e5f8f0ceb18df1758af.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #include <curand.h> #include <curand_kernel.h> #include "cuda_vector_routines.h" #include "isspa_class.h" #include "isspa_force_cuda.h" #include "constants.h" #include "cuda_profiler_api.h" using namespace std; // constants __constant__ int nTypes; __constant__ int nMC; __constant__ int nRs; __constant__ int nGRs; __constant__ int nERs; __constant__ int nAtoms; __constant__ int nPairs; __constant__ float2 box; __constant__ float2 forceRparams; __constant__ float2 gRparams; __constant__ float2 eRparams; // device functions // CUDA Kernels // atomic multiply __device__ float atomicMul(float* address, float val) { unsigned int* address_as_u = (unsigned int*)address; unsigned int old = *address_as_u, assumed; do { assumed = old; old = atomicCAS(address_as_u, assumed, __float_as_uint(val * __uint_as_float(assumed))); } while (assumed != old); return __uint_as_float(old); } // warp reduce a float using multiplication __inline__ __device__ float warpReduceMul(float val) { for (int offset = warpSize/2; offset > 0; offset /= 2) val *= __shfl_down(val, offset); return val; } // warp reduce a float4 __inline__ __device__ float4 warpReduceSumQuad(float4 val) { for (int offset = warpSize / 2; offset > 0; offset /= 2) { val.x += __shfl_down(val.x, offset); val.y += __shfl_down(val.y, offset); val.z += __shfl_down(val.z, offset); val.w += __shfl_down(val.w, offset); } return val; } // warp reduce a float4 but only the first three values __inline__ __device__ float4 warpReduceSumTriple(float4 val) { for (int offset = warpSize / 2; offset > 0; offset /= 2) { val.x += __shfl_down(val.x, offset); val.y += __shfl_down(val.y, offset); val.z += __shfl_down(val.z, offset); } return val; } // kernel to generate MC points around each atom __global__ void isspa_MC_points_kernel(float4 *xyz, float4 *mcpos, curandState *state, const float* __restrict__ rmax, int *isspaTypes) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int atom; int it; float r2; float rmax_l; float4 mcr; float4 mcpos_l; curandState_t threadState; atom = int(double(index)/double(nMC)); if (atom < nAtoms) { // load atom paramters it = __ldg(isspaTypes+atom); rmax_l = rmax[it]; mcpos_l = __ldg(xyz+atom); // initialize the random state threadState = state[index]; // generate point in constant density sphere do { mcr.x = fmaf(2.0f,curand_uniform(&threadState),-1.0f); mcr.y = fmaf(2.0f,curand_uniform(&threadState),-1.0f); mcr.z = fmaf(2.0f,curand_uniform(&threadState),-1.0f); r2 = mcr.x*mcr.x + mcr.y*mcr.y + mcr.z*mcr.z; } while (r2 >= 0.99f); // expand sphere and translate by atom position mcr *= rmax_l; mcpos_l += mcr; // initialize density at MC point to 1 mcpos_l.w = 1.0f; // save MC point and random state back to global memory mcpos[index] = mcpos_l; state[index] = threadState; } } // kernel to compute density and mean field at each MC point __global__ void __launch_bounds__(32, 8) isspa_field_kernel(float4 *xyz, const float* __restrict__ rmax, int *isspaTypes, const float* __restrict__ gTable, const float* __restrict__ eTable, float4 *mcpos, float *buf_mcpos, float4 *buf_enow, float4 *buf_e0now) { int atom; int bin; int jt; int mc; int tRow, tCol; int tileIdx; float rmax_l; float dist2, dist; float fracDist; float g1, g2; float e1, e2; float etab; float2 gRparams_l = gRparams; float2 eRparams_l = eRparams; float4 r; float4 mcpos_l; float4 enow_l; float4 e0now_l; // Determine the tile index to be calculated tileIdx = blockIdx.x; //obtain the number of rows and columns in tile matrix unsigned int nRows = ceil(double(nAtoms)/double(32.0)); unsigned int nCols = ceil(double(nMC*nAtoms)/double(32.0f)); // Determine the current tiles position in the tile matrix tRow = int(double(tileIdx)/double(nCols)); tCol = tileIdx-nCols*tRow; // Determine the MC point for the thread mc = tCol*32 + threadIdx.x; // Load in atom data in shared memory extern __shared__ float4 xyz_s[]; xyz_s[threadIdx.x] = xyz[32*tRow + threadIdx.x]; __syncthreads(); if (mc < nMC*nAtoms) { // zero the local variables that will be reduced mcpos_l = __ldg(mcpos+mc); mcpos_l.w = 1.0f; enow_l.x = enow_l.y = enow_l.z = enow_l.w = 0.0f; e0now_l.x = e0now_l.y = e0now_l.z = e0now_l.w = 0.0f; // loop over atoms in tile for each MC point in tile for (unsigned int offset = 0; offset < 32; offset++) { atom = 32*tRow + offset; // Load in position, atom type, and rmax of atom jt = __ldg(isspaTypes + atom); rmax_l = rmax[jt]; if (atom < nAtoms) { // Get constants for atom jt = __ldg(isspaTypes+atom); r = min_image(mcpos_l - xyz_s[offset],box.x,box.y); dist2 = r.x*r.x + r.y*r.y + r.z*r.z; dist = sqrtf(dist2); if (dist <= rmax_l) { e0now_l.w += 1.0f; // determine density bin of distance bin = int(__fdividef(dist-gRparams_l.x,gRparams_l.y)); // make sure bin is in limits of density table if (bin < 0) { mcpos_l.w = 0.0f; } else if (bin < nGRs-1) { // Push Density to MC point fracDist = __fdividef((dist - (gRparams_l.x+bin*gRparams_l.y)),gRparams_l.y); g1 = gTable[jt*nGRs+bin]; g2 = gTable[jt*nGRs+bin+1]; mcpos_l.w *= fmaf(g2,fracDist,g1*(1.0f-fracDist)); //mcpos_l.w = gTable[jt*nGRs+bin]; // Push mean field to MC point fracDist = __fdividef((dist - (eRparams_l.x+bin*eRparams_l.y)),eRparams_l.y); e1 = eTable[jt*nERs+bin]; e2 = eTable[jt*nERs+bin+1]; etab = fmaf(e2,fracDist,e1*(1.0f-fracDist)); //etab = eTable[jt*nGRs+bin]; enow_l += r*__fdividef(etab,dist); } } else { e0now_l -= r*__fdividef(e0*xyz_s[offset].w,dist2*dist); } enow_l -= r*__fdividef(e0*xyz_s[offset].w,dist2*dist); } } buf_mcpos[mc*nRows+tRow] = mcpos_l.w; buf_enow[mc*nRows+tRow] = enow_l; buf_e0now[mc*nRows+tRow] = e0now_l; } } __global__ void __launch_bounds__(1024,1) reduce_convert_kernel(float *buf_mcpos, float4 *buf_enow, float4 *buf_e0now, float4 *mcpos, float4 *enow, float4 *e0now) { float buf_mcpos_l; float4 buf_enow_l; float4 buf_e0now_l; int mc; buf_mcpos_l = 1.0f; buf_enow_l.x = buf_enow_l.y = buf_enow_l.z = 0.0f; buf_e0now_l.x = buf_e0now_l.y = buf_e0now_l.z = buf_e0now_l.w = 0.0f; mc = blockIdx.x*blockDim.x + threadIdx.x; if (mc < nMC*nAtoms) { unsigned int nRows = ceil(double(nAtoms)/double(32.0)); for (unsigned int offset = 0; offset < nRows; offset ++) { buf_mcpos_l *= buf_mcpos[mc*nRows+offset]; buf_enow_l.x += buf_enow[mc*nRows+offset].x; buf_enow_l.y += buf_enow[mc*nRows+offset].y; buf_enow_l.z += buf_enow[mc*nRows+offset].z; buf_e0now_l.x += buf_e0now[mc*nRows+offset].x; buf_e0now_l.y += buf_e0now[mc*nRows+offset].y; buf_e0now_l.z += buf_e0now[mc*nRows+offset].z; buf_e0now_l.w += buf_e0now[mc*nRows+offset].w; } mcpos[mc].w = buf_mcpos_l; enow[mc] = buf_enow_l; e0now[mc] = buf_e0now_l; } } __global__ void __launch_bounds__(32, 2) isspa_force_kernel(float4 *xyz, const float* __restrict__ rmax, const float* __restrict__ vtot, int *isspaTypes, const float* __restrict__ forceTable, float4 *f, float4 *enow, float4 *e0now, float4 *mcpos, float4 *isspaf) { int bin; int jt; int mc; int atom; int tRow, tCol; int tileIdx; float igo; float fs; float r0; float rmax_l; float vtot_l; float dist2, dist; float pdotr; float cothE; float c1,c2,c3; float dp1,dp2,dp3; float Rz; float f1, f2; float fracDist; float4 xyz_l; float4 r; float4 fi; float4 fj; // Determine the tile index to be calculated tileIdx = blockIdx.x; //obtain the number of rows and columns in tile matrix unsigned int nRows = ceil(double(nAtoms)/double(32.0f)); unsigned int nCols = ceil(double(nMC*nAtoms)/double(32.0f)); // Determine the current tiles position in the tile matrix tRow = int(double(tileIdx)/double(nCols)); tCol = tileIdx-nCols*tRow; // Determine the atom for the thread and lead in the parameters atom = 32*tRow + threadIdx.x; // Determine the atom parameters for this thread xyz_l = __ldg(xyz+atom); jt = __ldg(isspaTypes + atom); rmax_l = rmax[jt]; vtot_l = vtot[jt]; // Load in the density and electric fields for the MC points for this tile into shared memory __shared__ float4 mcpos_s[32]; __shared__ float4 enow_s[32]; __shared__ float4 e0now_s[32]; mc = tCol*32 + threadIdx.x; e0now_s[threadIdx.x] = e0now[mc]; enow_s[threadIdx.x] = enow[mc]; mcpos_s[threadIdx.x] = mcpos[mc]; igo = __fdividef(vtot_l,e0now_s[threadIdx.x].w); mcpos_s[threadIdx.x].w *= igo; r0 = norm3df(enow_s[threadIdx.x].x, enow_s[threadIdx.x].y, enow_s[threadIdx.x].z); enow_s[threadIdx.x].x = __fdividef(enow_s[threadIdx.x].x,r0); enow_s[threadIdx.x].y = __fdividef(enow_s[threadIdx.x].y,r0); enow_s[threadIdx.x].z = __fdividef(enow_s[threadIdx.x].z,r0); enow_s[threadIdx.x].w = r0; e0now_s[threadIdx.x].x = __fdividef(e0now_s[threadIdx.x].x,3.0f); e0now_s[threadIdx.x].y = __fdividef(e0now_s[threadIdx.x].y,3.0f); e0now_s[threadIdx.x].z = __fdividef(e0now_s[threadIdx.x].z,3.0f); e0now_s[threadIdx.x].w = igo; __syncthreads(); if (atom < nAtoms) { // Zero out the forces fi.x = fi.y = fi.z = 0.0f; //fj.x = fj.y = fj.z = 0.0f; // loop over the MC points for each atom for (unsigned int offset = 0; offset < 32; offset += 1) { // Determine the MC points from atom2 mc = tCol*32 + offset; if (mc < nMC*nAtoms) { // Calculate the distance between the MC point and atom1 r = min_image(mcpos_s[offset] - xyz_l,box.x,box.y); dist2 = r.x*r.x + r.y*r.y + r.z*r.z; dist = sqrtf(dist2); // Coulombic Force cothE=__fdividef(1.0f,tanhf(enow_s[offset].w)); c1=cothE-__fdividef(1.0f,enow_s[offset].w); c2=1.0f-2.0f*__fdividef(c1,enow_s[offset].w); c3=cothE-3.0f*__fdividef(c2,enow_s[offset].w); Rz=__fdividef(enow_s[offset].x*r.x+enow_s[offset].y*r.y+enow_s[offset].z*r.z,dist); dp1=3.0f*Rz; dp2=7.5f*Rz*Rz-1.5f; dp3=(17.50f*Rz*Rz-7.50f)*Rz; // Calculate dipole term fs = __fdividef(-xyz_l.w*p0*c1*mcpos_s[offset].w,dist2*dist); fi += fs*(r*__fdividef(dp1,dist)-enow_s[offset]); //fj += fs*(r*__fdividef(dp1,dist)-enow_s[offset]); // Calculate quadrapole term fs = __fdividef(-xyz_l.w*q0*(1.5f*c2-0.5f)*mcpos_s[offset].w,dist2*dist2); fi += fs*(r*__fdividef(dp2,dist)-dp1*enow_s[offset]); //fj += fs*(r*__fdividef(dp2,dist)-dp1*enow_s[offset]); // Calculate octapole term fs = __fdividef(-xyz_l.w*o0*(2.5f*c3-1.5f*c1)*mcpos_s[offset].w,dist2*dist2*dist); fi += fs*(r*__fdividef(dp3,dist)-dp2*enow_s[offset]); //fj += fs*(r*__fdividef(dp3,dist)-dp2*enow_s[offset]); // Lennard-Jones Force if (dist <= rmax_l) { bin = int ( __fdividef(dist-forceRparams.x,forceRparams.y) + 0.5f); if (bin >= (nRs)) { fs = 0.0f; } else { //Lennard-Jones Force fracDist = __fdividef((dist-(forceRparams.x+bin*forceRparams.y)),forceRparams.y); f1 = forceTable[jt*nRs+bin]; f2 = forceTable[jt*nRs+bin+1]; fs = (f1*(1.0-fracDist)+f2*fracDist)*mcpos_s[offset].w; fs = fmaf(f2,fracDist,f1*(1.0f-fracDist))*mcpos_s[offset].w; //fs = forceTable[jt*nRs+bin]*mcpos_s[offset].w; } fi += r*__fdividef(-fs,dist); //fj += r*__fdividef(-fs,dist); } else { // Constant Density Dielectric fs=__fdividef(-xyz_l.w*p0,dist2*dist); pdotr=__fdividef(3.0f*(e0now_s[offset].x*r.x+e0now_s[offset].y*r.y+e0now_s[offset].z*r.z),dist2); fi -= fs*(pdotr*r-e0now_s[offset])*e0now_s[offset].w; //fj -= fs*(pdotr*r-e0now_s[offset])*e0now_s[offset].w; } } } atomicAdd(&(f[atom].x), fi.x); atomicAdd(&(f[atom].y), fi.y); atomicAdd(&(f[atom].z), fi.z); //atomicAdd(&(isspaf[atom].x), fj.x); //atomicAdd(&(isspaf[atom].y), fj.y); //atomicAdd(&(isspaf[atom].z), fj.z); } } /* C wrappers for kernels */ float isspa_force_cuda(float4 *xyz_d, float4 *f_d, float4 *isspaf_d, isspa& isspas, int nAtoms_h) { //float isspa_force_cuda(float4 *xyz_d, float4 *f_d, isspa& isspas, int nAtoms_h) { float milliseconds; // timing cudaEventRecord(isspas.isspaStart); cudaProfilerStart(); // zero IS-SPA arrays on GPU cudaMemset(isspas.enow_d, 0.0f, nAtoms_h*isspas.nMC*sizeof(float4)); cudaMemset(isspas.e0now_d, 0.0f, nAtoms_h*isspas.nMC*sizeof(float4)); cudaMemset(isspaf_d, 0.0f, nAtoms_h*sizeof(float4)); //cudaMemset(isspas.buffer_mcpos_d, 1.0f, ceil(nAtoms_h/32.0)*nAtoms_h*isspas.nMC*sizeof(float)); //cudaMemset(isspas.buffer_enow_d, 0.0f, ceil(nAtoms_h/32.0)*nAtoms_h*isspas.nMC*sizeof(float4)); //cudaMemset(isspas.buffer_e0now_d, 0.0f, ceil(nAtoms_h/32.0)*nAtoms_h*isspas.nMC*sizeof(float4)); // compute position of each MC point isspa_MC_points_kernel<<<isspas.mcGridSize,isspas.mcBlockSize >>>(xyz_d, isspas.mcpos_d, isspas.randStates_d, isspas.rmax_d, isspas.isspaTypes_d); // compute densities and mean electric field value for each MC point isspa_field_kernel<<<isspas.fieldGridSize, isspas.fieldBlockSize, 32*sizeof(float4)>>>(xyz_d, isspas.rmax_d, isspas.isspaTypes_d, isspas.isspaGTable_d, isspas.isspaETable_d, isspas.mcpos_d, isspas.buffer_mcpos_d, isspas.buffer_enow_d, isspas.buffer_e0now_d); // Reduce the fields and convert to polarizations reduce_convert_kernel<<<ceil(isspas.nMC*nAtoms_h/1024.0),1024>>>(isspas.buffer_mcpos_d, isspas.buffer_enow_d, isspas.buffer_e0now_d, isspas.mcpos_d, isspas.enow_d, isspas.e0now_d); // compute forces for each atom isspa_force_kernel<<<isspas.forceGridSize, isspas.forceBlockSize>>>(xyz_d,isspas.rmax_d,isspas.vtot_d,isspas.isspaTypes_d,isspas.isspaForceTable_d,f_d,isspas.enow_d,isspas.e0now_d,isspas.mcpos_d,isspaf_d); cudaDeviceSynchronize(); cudaProfilerStop(); // finish timing cudaEventRecord(isspas.isspaStop); cudaEventSynchronize(isspas.isspaStop); cudaEventElapsedTime(&milliseconds, isspas.isspaStart, isspas.isspaStop); return milliseconds; } void isspa_grid_block(int nAtoms_h, int nPairs_h, float lbox_h, isspa& isspas) { float2 box_h; int maxThreadsPerBlock = 1024; // determine gridSize and blockSize for MC kernel isspas.mcGridSize = int(ceil(isspas.nMC*nAtoms_h/ (float) maxThreadsPerBlock)); isspas.mcBlockSize = maxThreadsPerBlock; printf("Number of IS-SPA mc kernel blocks: %d \n", isspas.mcGridSize); printf("Number of IS-SPA mc kernel threads per block: %d \n", isspas.mcBlockSize); // determine gridSize and blockSize for field kernel isspas.fieldGridSize = ceil(nAtoms_h/32.0)*ceil(isspas.nMC*nAtoms_h/32.0); isspas.fieldBlockSize = 32; printf("Number of IS-SPA field kernel blocks: %d \n", isspas.fieldGridSize); printf("Number of IS-SPA field kernel threads per block: %d \n", isspas.fieldBlockSize); // determine gridSize and blockSize for force kernel //isspas.forceThreads = temp*32; isspas.forceGridSize = ceil(nAtoms_h/32.0)*ceil(isspas.nMC*nAtoms_h/32.0); isspas.forceBlockSize = 32; printf("Number of IS-SPA force kernel blocks: %d \n", isspas.forceGridSize); printf("Number of IS-SPA force kernel threads per block: %d \n", isspas.forceBlockSize); // fill box with box and half box length box_h.x = lbox_h; box_h.y = lbox_h/2.0f; // set constant memory cudaMemcpyToSymbol(nMC, &isspas.nMC, sizeof(int)); cudaMemcpyToSymbol(nTypes, &isspas.nTypes, sizeof(int)); cudaMemcpyToSymbol(nRs, &isspas.nRs, sizeof(int)); cudaMemcpyToSymbol(nGRs, &isspas.nGRs, sizeof(int)); cudaMemcpyToSymbol(nERs, &isspas.nERs, sizeof(int)); cudaMemcpyToSymbol(nAtoms, &nAtoms_h, sizeof(int)); cudaMemcpyToSymbol(nPairs, &nPairs_h, sizeof(int)); cudaMemcpyToSymbol(box, &box_h, sizeof(float2)); cudaMemcpyToSymbol(forceRparams, &isspas.forceRparams, sizeof(float2)); cudaMemcpyToSymbol(gRparams, &isspas.gRparams, sizeof(float2)); cudaMemcpyToSymbol(eRparams, &isspas.eRparams, sizeof(float2)); }
07ab9d0b244cac99e8dfb918d8f08d0d27f6e51d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cunn_OneVsAllNLLCriterion_updateGradInput_kernel(float *gradInput, float *input, float *target, int nframe, int dim, int sizeaverage, float *positiveWeight) { // __shared__ float buffer[NLL_THREADS]; int k = blockIdx.x; float *input_k = input + k*dim; float *gradInput_k = gradInput + k*dim; int target_k = ((int)target[k])-1; float g = (sizeaverage ? 1./((float)nframe) : 1.); int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) { float z = (i==target_k) ? -g*positiveWeight[i]/input_k[i] : g/(1 - input_k[i]); gradInput_k[i] = z; } __syncthreads(); // reduce //if (threadIdx.x == 0) //{ // float gradInput_target_k = 0; //for (int i=0; i<blockDim.x; i++) // gradInput_target_k += buffer[i]; //gradInput_k[target_k] = gradInput_target_k; //} }
07ab9d0b244cac99e8dfb918d8f08d0d27f6e51d.cu
#include "includes.h" __global__ void cunn_OneVsAllNLLCriterion_updateGradInput_kernel(float *gradInput, float *input, float *target, int nframe, int dim, int sizeaverage, float *positiveWeight) { // __shared__ float buffer[NLL_THREADS]; int k = blockIdx.x; float *input_k = input + k*dim; float *gradInput_k = gradInput + k*dim; int target_k = ((int)target[k])-1; float g = (sizeaverage ? 1./((float)nframe) : 1.); int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) { float z = (i==target_k) ? -g*positiveWeight[i]/input_k[i] : g/(1 - input_k[i]); gradInput_k[i] = z; } __syncthreads(); // reduce //if (threadIdx.x == 0) //{ // float gradInput_target_k = 0; //for (int i=0; i<blockDim.x; i++) // gradInput_target_k += buffer[i]; //gradInput_k[target_k] = gradInput_target_k; //} }
09646132c4331cf841aaa6683b9d75c76120eb97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <time.h> #include <stdlib.h> #include <stdio.h> #define NUMTHREADS 32 #define N 129 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void dyadic(int* res, int* a, int* b, int n) { for(int i=0; i<N; i++) { for(int j=0; j<N; j++) { res[i+N*j] = a[j]*b[i]; } } } __global__ void dyadic_gpu(int* res, int* a, int* b, int n) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; if(x < n && y < n) res[x+n*y] = a[y]*b[x]; } int main() { srand(time(NULL)); int* a = (int*)malloc(N*sizeof(int)); int* b = (int*)malloc(N*sizeof(int)); for(int i=0; i<N; i++) { a[i] = rand()%100; b[i] = rand()%100; } int* res = (int*)malloc(N*N*sizeof(int)); dyadic(res, a, b, N); // CUDA stuff int* d_a, *d_b, *d_res; gpuErrchk(hipMalloc((void**)&d_a, N*sizeof(int))); gpuErrchk(hipMalloc((void**)&d_b, N*sizeof(int))); gpuErrchk(hipMalloc((void**)&d_res, N*N*sizeof(int))); gpuErrchk(hipMemcpy(d_a, a, N*sizeof(int), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_b, b, N*sizeof(int), hipMemcpyHostToDevice)); dim3 threads(16, 16, 1); dim3 grid(ceil((float)N/(float)16), ceil((float)N/(float)16), 1); // (N+threads.x-1)/threads.x hipMemset(d_res, 0,N*N*sizeof(int)); hipLaunchKernelGGL(( dyadic_gpu), dim3(grid),dim3(threads), 0, 0, d_res, d_a, d_b, N); gpuErrchk(hipGetLastError()); int* resgpu = (int*)malloc(N*N*sizeof(int)); gpuErrchk(hipMemcpy(resgpu, d_res, N*N*sizeof(int), hipMemcpyDeviceToHost)); for(int i=0; i<N; i++) { for(int j=0; j<N; j++) { printf("%d ", res[i+N*j]); } printf("\n"); } printf("-----\n"); for(int i=0; i<N; i++) { for(int j=0; j<N; j++) { printf("%d ", resgpu[i+N*j]); } printf("\n"); } printf("\ngrid size: %d, %d\n", grid.x, grid.y); }
09646132c4331cf841aaa6683b9d75c76120eb97.cu
#include <time.h> #include <stdlib.h> #include <stdio.h> #define NUMTHREADS 32 #define N 129 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void dyadic(int* res, int* a, int* b, int n) { for(int i=0; i<N; i++) { for(int j=0; j<N; j++) { res[i+N*j] = a[j]*b[i]; } } } __global__ void dyadic_gpu(int* res, int* a, int* b, int n) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; if(x < n && y < n) res[x+n*y] = a[y]*b[x]; } int main() { srand(time(NULL)); int* a = (int*)malloc(N*sizeof(int)); int* b = (int*)malloc(N*sizeof(int)); for(int i=0; i<N; i++) { a[i] = rand()%100; b[i] = rand()%100; } int* res = (int*)malloc(N*N*sizeof(int)); dyadic(res, a, b, N); // CUDA stuff int* d_a, *d_b, *d_res; gpuErrchk(cudaMalloc((void**)&d_a, N*sizeof(int))); gpuErrchk(cudaMalloc((void**)&d_b, N*sizeof(int))); gpuErrchk(cudaMalloc((void**)&d_res, N*N*sizeof(int))); gpuErrchk(cudaMemcpy(d_a, a, N*sizeof(int), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_b, b, N*sizeof(int), cudaMemcpyHostToDevice)); dim3 threads(16, 16, 1); dim3 grid(ceil((float)N/(float)16), ceil((float)N/(float)16), 1); // (N+threads.x-1)/threads.x cudaMemset(d_res, 0,N*N*sizeof(int)); dyadic_gpu<<<grid,threads>>>(d_res, d_a, d_b, N); gpuErrchk(cudaGetLastError()); int* resgpu = (int*)malloc(N*N*sizeof(int)); gpuErrchk(cudaMemcpy(resgpu, d_res, N*N*sizeof(int), cudaMemcpyDeviceToHost)); for(int i=0; i<N; i++) { for(int j=0; j<N; j++) { printf("%d ", res[i+N*j]); } printf("\n"); } printf("-----\n"); for(int i=0; i<N; i++) { for(int j=0; j<N; j++) { printf("%d ", resgpu[i+N*j]); } printf("\n"); } printf("\ngrid size: %d, %d\n", grid.x, grid.y); }
fde0dec8794f8dc1ed5449974d0cf531b44d605c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void multiply_by_two(double *y, const double *x, int n){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n){ y[i] = 2*x[i]; } } void multiply_by_two_forward(double *y, const double *x, int n){ hipLaunchKernelGGL(( multiply_by_two), dim3((n-1)/64 + 1), dim3(64) , 0, 0, y, x, n); } void multiply_by_two_backward(double *grad_x, const double *grad_y, int n){ hipLaunchKernelGGL(( multiply_by_two), dim3((n-1)/64 + 1), dim3(64) , 0, 0, grad_x, grad_y, n); }
fde0dec8794f8dc1ed5449974d0cf531b44d605c.cu
#include "cuda.h" __global__ void multiply_by_two(double *y, const double *x, int n){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n){ y[i] = 2*x[i]; } } void multiply_by_two_forward(double *y, const double *x, int n){ multiply_by_two<<< (n-1)/64 + 1, 64 >>>(y, x, n); } void multiply_by_two_backward(double *grad_x, const double *grad_y, int n){ multiply_by_two<<< (n-1)/64 + 1, 64 >>>(grad_x, grad_y, n); }
6809f4e6d52743b9114a95f192439de43b64e8d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Marathon Match - CCL - Label Equivalence #include <iostream> #include <iomanip> #include <fstream> #include <sstream> #include <string> #include <vector> #include <map> #include <queue> #include <list> #include <algorithm> #include <utility> #include <cmath> #include <functional> #include <cstring> #include <cmath> #include <limits> #include <cutil_inline.h> #define NOMINMAX #ifdef _MSC_VER #include <ctime> inline double get_time() { return static_cast<double>(std::clock()) / CLOCKS_PER_SEC; } #else #include <sys/time.h> inline double get_time() { timeval tv; gettimeofday(&tv, 0); return tv.tv_sec + 1e-6 * tv.tv_usec; } #endif using namespace std; //const int BLOCK = 128; const int BLOCK = 256; __global__ void init_CCL(int L[], int R[], int N) { int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; if (id >= N) return; L[id] = R[id] = id; } __device__ int diff(int d1, int d2) { return abs(((d1>>16) & 0xff) - ((d2>>16) & 0xff)) + abs(((d1>>8) & 0xff) - ((d2>>8) & 0xff)) + abs((d1 & 0xff) - (d2 & 0xff)); } __global__ void scanning(int D[], int L[], int R[], bool* m, int N, int W, int th) { int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; if (id >= N) return; int Did = D[id]; int label = N; if (id - W >= 0 && diff(Did, D[id-W]) <= th) label = min(label, L[id-W]); if (id + W < N && diff(Did, D[id+W]) <= th) label = min(label, L[id+W]); int r = id % W; if (r && diff(Did, D[id-1]) <= th) label = min(label, L[id-1]); if (r + 1 != W && diff(Did, D[id+1]) <= th) label = min(label, L[id+1]); if (label < L[id]) { //atomicMin(&R[L[id]], label); R[L[id]] = label; *m = true; } } __global__ void scanning8(int D[], int L[], int R[], bool* m, int N, int W, int th) { int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; if (id >= N) return; int Did = D[id]; int label = N; if (id - W >= 0 && diff(Did, D[id-W]) <= th) label = min(label, L[id-W]); if (id + W < N && diff(Did, D[id+W]) <= th) label = min(label, L[id+W]); int r = id % W; if (r) { if (diff(Did, D[id-1]) <= th) label = min(label, L[id-1]); if (id - W - 1 >= 0 && diff(Did, D[id-W-1]) <= th) label = min(label, L[id-W-1]); if (id + W - 1 < N && diff(Did, D[id+W-1]) <= th) label = min(label, L[id+W-1]); } if (r + 1 != W) { if (diff(Did, D[id+1]) <= th) label = min(label, L[id+1]); if (id - W + 1 >= 0 && diff(Did, D[id-W+1]) <= th) label = min(label, L[id-W+1]); if (id + W + 1 < N && diff(Did, D[id+W+1]) <= th) label = min(label, L[id+W+1]); } if (label < L[id]) { //atomicMin(&R[L[id]], label); R[L[id]] = label; *m = true; } } __global__ void analysis(int D[], int L[], int R[], int N) { int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; if (id >= N) return; int label = L[id]; int ref; if (label == id) { do { label = R[ref = label]; } while (ref ^ label); R[id] = label; } } __global__ void labeling(int D[], int L[], int R[], int N) { int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; if (id >= N) return; L[id] = R[R[L[id]]]; } class CCL { private: int* Dd; int* Ld; int* Rd; public: vector<int> cuda_ccl(vector<int>& image, int W, int degree_of_connectivity, int threshold); }; vector<int> CCL::cuda_ccl(vector<int>& image, int W, int degree_of_connectivity, int threshold) { vector<int> result; int* D = static_cast<int*>(&image[0]); int N = image.size(); hipMalloc((void**)&Ld, sizeof(int) * N); hipMalloc((void**)&Rd, sizeof(int) * N); hipMalloc((void**)&Dd, sizeof(int) * N); hipMemcpy(Dd, D, sizeof(int) * N, hipMemcpyHostToDevice); bool* md; hipMalloc((void**)&md, sizeof(bool)); int width = static_cast<int>(sqrt(static_cast<double>(N) / BLOCK)) + 1; dim3 grid(width, width, 1); dim3 threads(BLOCK, 1, 1); hipLaunchKernelGGL(( init_CCL), dim3(grid), dim3(threads), 0, 0, Ld, Rd, N); for (;;) { bool m = false; hipMemcpy(md, &m, sizeof(bool), hipMemcpyHostToDevice); if (degree_of_connectivity == 4)hipLaunchKernelGGL(( scanning), dim3(grid), dim3(threads), 0, 0, Dd, Ld, Rd, md, N, W, threshold); elsehipLaunchKernelGGL(( scanning8), dim3(grid), dim3(threads), 0, 0, Dd, Ld, Rd, md, N, W, threshold); hipMemcpy(&m, md, sizeof(bool), hipMemcpyDeviceToHost); if (m) { hipLaunchKernelGGL(( analysis), dim3(grid), dim3(threads), 0, 0, Dd, Ld, Rd, N); //hipDeviceSynchronize(); hipLaunchKernelGGL(( labeling), dim3(grid), dim3(threads), 0, 0, Dd, Ld, Rd, N); } else break; } hipMemcpy(D, Ld, sizeof(int) * N, hipMemcpyDeviceToHost); hipFree(Dd); hipFree(Ld); hipFree(Rd); result.swap(image); return result; } void read_data(const string filename, vector<int>& image, int& W, int& degree_of_connectivity, int& threshold) { fstream fs(filename.c_str(), ios_base::in); string line; stringstream ss; int data; getline(fs, line); ss.str(line); ss >> W >> degree_of_connectivity >> threshold; getline(fs, line); ss.str(""); ss.clear(); for (ss.str(line); ss >> data; image.push_back(data)); } int main(int argc, char* argv[]) { ios_base::sync_with_stdio(false); if (argc < 2) { cerr << "Usage: " << argv[0] << " input_file" << endl; exit(1); } hipSetDevice(cutGetMaxGflopsDeviceId()); vector<int> image; int W, degree_of_connectivity, threshold; read_data(argv[1], image, W, degree_of_connectivity, threshold); CCL ccl; double start = get_time(); vector<int> result(ccl.cuda_ccl(image, W, degree_of_connectivity, threshold)); double end = get_time(); cerr << "Time: " << end - start << endl; cout << result.size() << endl; /// number of pixels cout << W << endl; /// width for (int i = 0; i < static_cast<int>(result.size()) / W; i++) { for (int j = 0; j < W; j++) cout << result[i*W+j] << " "; cout << endl; } return 0; }
6809f4e6d52743b9114a95f192439de43b64e8d7.cu
// Marathon Match - CCL - Label Equivalence #include <iostream> #include <iomanip> #include <fstream> #include <sstream> #include <string> #include <vector> #include <map> #include <queue> #include <list> #include <algorithm> #include <utility> #include <cmath> #include <functional> #include <cstring> #include <cmath> #include <limits> #include <cutil_inline.h> #define NOMINMAX #ifdef _MSC_VER #include <ctime> inline double get_time() { return static_cast<double>(std::clock()) / CLOCKS_PER_SEC; } #else #include <sys/time.h> inline double get_time() { timeval tv; gettimeofday(&tv, 0); return tv.tv_sec + 1e-6 * tv.tv_usec; } #endif using namespace std; //const int BLOCK = 128; const int BLOCK = 256; __global__ void init_CCL(int L[], int R[], int N) { int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; if (id >= N) return; L[id] = R[id] = id; } __device__ int diff(int d1, int d2) { return abs(((d1>>16) & 0xff) - ((d2>>16) & 0xff)) + abs(((d1>>8) & 0xff) - ((d2>>8) & 0xff)) + abs((d1 & 0xff) - (d2 & 0xff)); } __global__ void scanning(int D[], int L[], int R[], bool* m, int N, int W, int th) { int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; if (id >= N) return; int Did = D[id]; int label = N; if (id - W >= 0 && diff(Did, D[id-W]) <= th) label = min(label, L[id-W]); if (id + W < N && diff(Did, D[id+W]) <= th) label = min(label, L[id+W]); int r = id % W; if (r && diff(Did, D[id-1]) <= th) label = min(label, L[id-1]); if (r + 1 != W && diff(Did, D[id+1]) <= th) label = min(label, L[id+1]); if (label < L[id]) { //atomicMin(&R[L[id]], label); R[L[id]] = label; *m = true; } } __global__ void scanning8(int D[], int L[], int R[], bool* m, int N, int W, int th) { int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; if (id >= N) return; int Did = D[id]; int label = N; if (id - W >= 0 && diff(Did, D[id-W]) <= th) label = min(label, L[id-W]); if (id + W < N && diff(Did, D[id+W]) <= th) label = min(label, L[id+W]); int r = id % W; if (r) { if (diff(Did, D[id-1]) <= th) label = min(label, L[id-1]); if (id - W - 1 >= 0 && diff(Did, D[id-W-1]) <= th) label = min(label, L[id-W-1]); if (id + W - 1 < N && diff(Did, D[id+W-1]) <= th) label = min(label, L[id+W-1]); } if (r + 1 != W) { if (diff(Did, D[id+1]) <= th) label = min(label, L[id+1]); if (id - W + 1 >= 0 && diff(Did, D[id-W+1]) <= th) label = min(label, L[id-W+1]); if (id + W + 1 < N && diff(Did, D[id+W+1]) <= th) label = min(label, L[id+W+1]); } if (label < L[id]) { //atomicMin(&R[L[id]], label); R[L[id]] = label; *m = true; } } __global__ void analysis(int D[], int L[], int R[], int N) { int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; if (id >= N) return; int label = L[id]; int ref; if (label == id) { do { label = R[ref = label]; } while (ref ^ label); R[id] = label; } } __global__ void labeling(int D[], int L[], int R[], int N) { int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; if (id >= N) return; L[id] = R[R[L[id]]]; } class CCL { private: int* Dd; int* Ld; int* Rd; public: vector<int> cuda_ccl(vector<int>& image, int W, int degree_of_connectivity, int threshold); }; vector<int> CCL::cuda_ccl(vector<int>& image, int W, int degree_of_connectivity, int threshold) { vector<int> result; int* D = static_cast<int*>(&image[0]); int N = image.size(); cudaMalloc((void**)&Ld, sizeof(int) * N); cudaMalloc((void**)&Rd, sizeof(int) * N); cudaMalloc((void**)&Dd, sizeof(int) * N); cudaMemcpy(Dd, D, sizeof(int) * N, cudaMemcpyHostToDevice); bool* md; cudaMalloc((void**)&md, sizeof(bool)); int width = static_cast<int>(sqrt(static_cast<double>(N) / BLOCK)) + 1; dim3 grid(width, width, 1); dim3 threads(BLOCK, 1, 1); init_CCL<<<grid, threads>>>(Ld, Rd, N); for (;;) { bool m = false; cudaMemcpy(md, &m, sizeof(bool), cudaMemcpyHostToDevice); if (degree_of_connectivity == 4) scanning<<<grid, threads>>>(Dd, Ld, Rd, md, N, W, threshold); else scanning8<<<grid, threads>>>(Dd, Ld, Rd, md, N, W, threshold); cudaMemcpy(&m, md, sizeof(bool), cudaMemcpyDeviceToHost); if (m) { analysis<<<grid, threads>>>(Dd, Ld, Rd, N); //cudaThreadSynchronize(); labeling<<<grid, threads>>>(Dd, Ld, Rd, N); } else break; } cudaMemcpy(D, Ld, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaFree(Dd); cudaFree(Ld); cudaFree(Rd); result.swap(image); return result; } void read_data(const string filename, vector<int>& image, int& W, int& degree_of_connectivity, int& threshold) { fstream fs(filename.c_str(), ios_base::in); string line; stringstream ss; int data; getline(fs, line); ss.str(line); ss >> W >> degree_of_connectivity >> threshold; getline(fs, line); ss.str(""); ss.clear(); for (ss.str(line); ss >> data; image.push_back(data)); } int main(int argc, char* argv[]) { ios_base::sync_with_stdio(false); if (argc < 2) { cerr << "Usage: " << argv[0] << " input_file" << endl; exit(1); } cudaSetDevice(cutGetMaxGflopsDeviceId()); vector<int> image; int W, degree_of_connectivity, threshold; read_data(argv[1], image, W, degree_of_connectivity, threshold); CCL ccl; double start = get_time(); vector<int> result(ccl.cuda_ccl(image, W, degree_of_connectivity, threshold)); double end = get_time(); cerr << "Time: " << end - start << endl; cout << result.size() << endl; /// number of pixels cout << W << endl; /// width for (int i = 0; i < static_cast<int>(result.size()) / W; i++) { for (int j = 0; j < W; j++) cout << result[i*W+j] << " "; cout << endl; } return 0; }
5dc71ba688fbb447bca7072b2a487cd0a2e03ce6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Suma de vectores secuencial #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <helper_cuda.h> #include <helper_timer.h> StopWatchInterface *hTimer = NULL; StopWatchInterface *kTimer = NULL; typedef int *vector; // Function for generating random values for a vector void LoadStartValuesIntoVectorRand(vector V, unsigned int n) { unsigned int i; for (i=0;i<n;i++) V[i] = (int)(random()%9); } // Function for printing a vector void PrintVector(vector V, unsigned int n) { unsigned int i; for (i=0;i<n;i++) printf("%d\n",V[i]); } void LoadP(vector P, unsigned int n) { unsigned int i, r; unsigned int aux; for (i=0;i<n;i++) P[i] = i; for (i=0;i<n;i++){ r = i+(random()%(n-i)); aux = P[i]; P[i] = P[r]; P[r] = aux; } } // Suma vectores cC = cA + cB __global__ void SumVectorCuda(vector cA, vector cB, vector cC, vector cP, unsigned int n, unsigned int v) { unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int end = ((idx + 1) * v) - 1; int tid /*= idx * v*/; //printf("End: %d\n", end); for (tid = idx * v; tid <= end ; tid++){ //printf("Tid: %d, Thread: %d\n", tid, idx); //printf("Vector cA: %d Thread: %d\n", cA[tid], idx); cC[cP[tid]] = cA[cP[tid]] + cB[cP[tid]]; } } // ------------------------ // MAIN function // ------------------------ int main(int argc, char **argv) { float timerValue; double ops; unsigned int n, v; //Pasar numero de componentes del vector por thread (v). if (argc == 3){ n = atoi(argv[1]); v = atoi(argv[2]); } else { printf ("Sintaxis: <ejecutable> <total number of elements> <elementos del vector por thread>\n"); exit(0); } if(n%v != 0){ printf("El nmero de componentes del vector por thread no es divisor del total de elementos del mismo\n"); exit(0); } srandom(12345); // Define vectors at host vector A; vector B; vector C; vector P; vector cA; vector cB; vector cC; vector cP; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); // Load values into A A = (int *) malloc(n*sizeof(int)); hipMalloc((void**)&cA,n*sizeof(int)); LoadStartValuesIntoVectorRand(A,n); hipMemcpy(cA, A, n*sizeof(int), hipMemcpyHostToDevice); //printf("\nPrinting Vector A %d\n",n); //PrintVector(A,n); // Load values B = (int *) malloc(n*sizeof(int)); hipMalloc((void**)&cB,n*sizeof(int)); LoadStartValuesIntoVectorRand(B,n); hipMemcpy(cB, B, n*sizeof(int), hipMemcpyHostToDevice); //printf("\nPrinting Vector B %d\n",n); //PrintVector(B,n); C = (int *) malloc(n*sizeof(int)); hipMalloc(&cC,n*sizeof(int)); // Load values P = (int *) malloc(n*sizeof(int)); hipMalloc((void**)&cP,n*sizeof(int)); LoadP(P,n); hipMemcpy(cP, P, n*sizeof(int), hipMemcpyHostToDevice); sdkCreateTimer(&kTimer); sdkResetTimer(&kTimer); sdkStartTimer(&kTimer); // execute the subprogram hipLaunchKernelGGL(( SumVectorCuda), dim3(n/(1024*v)),dim3(1024), 0, 0, cA,cB,cC,cP,n,v); hipDeviceSynchronize(); sdkStopTimer(&kTimer); //Copiar de dispositivo a host hipMemcpy(C, cC, n*sizeof(int), hipMemcpyDeviceToHost); //printf("\nPrinting vector C %d\n",n); //PrintVector(C,n); // Free vectors free(A); free(B); free(C); free(P); hipFree(cA); hipFree(cB); hipFree(cC); hipFree(cP); sdkStopTimer(&hTimer); //cambiar timers a los de cuda timerValue = sdkGetTimerValue(&kTimer); timerValue = timerValue / 1000; sdkDeleteTimer(&kTimer); printf("Tiempo kernel: %f s", timerValue); ops = n/timerValue; printf(" %f GFLOPS\n",(ops)/1000000000); timerValue = sdkGetTimerValue(&hTimer); timerValue = timerValue / 1000; sdkDeleteTimer(&hTimer); printf("Tiempo total: %f s", timerValue); ops = n/timerValue; printf(" %f GFLOPS \n",(ops)/1000000000); return 0; }
5dc71ba688fbb447bca7072b2a487cd0a2e03ce6.cu
// Suma de vectores secuencial #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <helper_cuda.h> #include <helper_timer.h> StopWatchInterface *hTimer = NULL; StopWatchInterface *kTimer = NULL; typedef int *vector; // Function for generating random values for a vector void LoadStartValuesIntoVectorRand(vector V, unsigned int n) { unsigned int i; for (i=0;i<n;i++) V[i] = (int)(random()%9); } // Function for printing a vector void PrintVector(vector V, unsigned int n) { unsigned int i; for (i=0;i<n;i++) printf("%d\n",V[i]); } void LoadP(vector P, unsigned int n) { unsigned int i, r; unsigned int aux; for (i=0;i<n;i++) P[i] = i; for (i=0;i<n;i++){ r = i+(random()%(n-i)); aux = P[i]; P[i] = P[r]; P[r] = aux; } } // Suma vectores cC = cA + cB __global__ void SumVectorCuda(vector cA, vector cB, vector cC, vector cP, unsigned int n, unsigned int v) { unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int end = ((idx + 1) * v) - 1; int tid /*= idx * v*/; //printf("End: %d\n", end); for (tid = idx * v; tid <= end ; tid++){ //printf("Tid: %d, Thread: %d\n", tid, idx); //printf("Vector cA: %d Thread: %d\n", cA[tid], idx); cC[cP[tid]] = cA[cP[tid]] + cB[cP[tid]]; } } // ------------------------ // MAIN function // ------------------------ int main(int argc, char **argv) { float timerValue; double ops; unsigned int n, v; //Pasar numero de componentes del vector por thread (v). if (argc == 3){ n = atoi(argv[1]); v = atoi(argv[2]); } else { printf ("Sintaxis: <ejecutable> <total number of elements> <elementos del vector por thread>\n"); exit(0); } if(n%v != 0){ printf("El número de componentes del vector por thread no es divisor del total de elementos del mismo\n"); exit(0); } srandom(12345); // Define vectors at host vector A; vector B; vector C; vector P; vector cA; vector cB; vector cC; vector cP; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); // Load values into A A = (int *) malloc(n*sizeof(int)); cudaMalloc((void**)&cA,n*sizeof(int)); LoadStartValuesIntoVectorRand(A,n); cudaMemcpy(cA, A, n*sizeof(int), cudaMemcpyHostToDevice); //printf("\nPrinting Vector A %d\n",n); //PrintVector(A,n); // Load values B = (int *) malloc(n*sizeof(int)); cudaMalloc((void**)&cB,n*sizeof(int)); LoadStartValuesIntoVectorRand(B,n); cudaMemcpy(cB, B, n*sizeof(int), cudaMemcpyHostToDevice); //printf("\nPrinting Vector B %d\n",n); //PrintVector(B,n); C = (int *) malloc(n*sizeof(int)); cudaMalloc(&cC,n*sizeof(int)); // Load values P = (int *) malloc(n*sizeof(int)); cudaMalloc((void**)&cP,n*sizeof(int)); LoadP(P,n); cudaMemcpy(cP, P, n*sizeof(int), cudaMemcpyHostToDevice); sdkCreateTimer(&kTimer); sdkResetTimer(&kTimer); sdkStartTimer(&kTimer); // execute the subprogram SumVectorCuda<<<n/(1024*v),1024>>>(cA,cB,cC,cP,n,v); cudaDeviceSynchronize(); sdkStopTimer(&kTimer); //Copiar de dispositivo a host cudaMemcpy(C, cC, n*sizeof(int), cudaMemcpyDeviceToHost); //printf("\nPrinting vector C %d\n",n); //PrintVector(C,n); // Free vectors free(A); free(B); free(C); free(P); cudaFree(cA); cudaFree(cB); cudaFree(cC); cudaFree(cP); sdkStopTimer(&hTimer); //cambiar timers a los de cuda timerValue = sdkGetTimerValue(&kTimer); timerValue = timerValue / 1000; sdkDeleteTimer(&kTimer); printf("Tiempo kernel: %f s", timerValue); ops = n/timerValue; printf(" %f GFLOPS\n",(ops)/1000000000); timerValue = sdkGetTimerValue(&hTimer); timerValue = timerValue / 1000; sdkDeleteTimer(&hTimer); printf("Tiempo total: %f s", timerValue); ops = n/timerValue; printf(" %f GFLOPS \n",(ops)/1000000000); return 0; }
25556baf11546d8e4d30fe3539188afbd8085ed9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #ifndef _SCAN_WORKEFFICIENT_KERNEL_H_ #define _SCAN_WORKEFFICIENT_KERNEL_H_ /////////////////////////////////////////////////////////////////////////////// //! Work-efficient compute implementation of scan, one thread per 2 elements //! Work-efficient: O(log(n)) steps, and O(n) adds. //! Also shared storage efficient: Uses n elements in shared mem -- no ping-ponging //! Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums //! and Their Applications", or Prins and Chatterjee PRAM course notes: //! https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf //! //! Pro: Work Efficient //! Con: Shared memory bank conflicts due to the addressing used. // //! @param g_odata output data in global memory //! @param g_idata input data in global memory //! @param n input number of elements to scan from input data /////////////////////////////////////////////////////////////////////////////// __global__ void scan_workefficient(float *g_odata, float *g_idata, int n) { // Dynamically allocated shared memory for scan kernels extern __shared__ float temp[]; int thid = threadIdx.x; int offset = 1; // Cache the computational window in shared memory temp[2*thid] = g_idata[2*thid]; temp[2*thid+1] = g_idata[2*thid+1]; // build the sum in place up the tree for (int d = n>>1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } // scan back down the tree // clear the last element if (thid == 0) { temp[n - 1] = 0; } // traverse down the tree building the scan in place for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); // write results to global memory g_odata[2*thid] = temp[2*thid]; g_odata[2*thid+1] = temp[2*thid+1]; } #endif // #ifndef _SCAN_WORKEFFICIENT_KERNEL_H_
25556baf11546d8e4d30fe3539188afbd8085ed9.cu
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #ifndef _SCAN_WORKEFFICIENT_KERNEL_H_ #define _SCAN_WORKEFFICIENT_KERNEL_H_ /////////////////////////////////////////////////////////////////////////////// //! Work-efficient compute implementation of scan, one thread per 2 elements //! Work-efficient: O(log(n)) steps, and O(n) adds. //! Also shared storage efficient: Uses n elements in shared mem -- no ping-ponging //! Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums //! and Their Applications", or Prins and Chatterjee PRAM course notes: //! https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf //! //! Pro: Work Efficient //! Con: Shared memory bank conflicts due to the addressing used. // //! @param g_odata output data in global memory //! @param g_idata input data in global memory //! @param n input number of elements to scan from input data /////////////////////////////////////////////////////////////////////////////// __global__ void scan_workefficient(float *g_odata, float *g_idata, int n) { // Dynamically allocated shared memory for scan kernels extern __shared__ float temp[]; int thid = threadIdx.x; int offset = 1; // Cache the computational window in shared memory temp[2*thid] = g_idata[2*thid]; temp[2*thid+1] = g_idata[2*thid+1]; // build the sum in place up the tree for (int d = n>>1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } // scan back down the tree // clear the last element if (thid == 0) { temp[n - 1] = 0; } // traverse down the tree building the scan in place for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); // write results to global memory g_odata[2*thid] = temp[2*thid]; g_odata[2*thid+1] = temp[2*thid+1]; } #endif // #ifndef _SCAN_WORKEFFICIENT_KERNEL_H_
493a99fb8ce62b7100d6afe37ea859bd894de79f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <primitiv/config.h> #include <primitiv/devices/cuda16/device.h> #include <primitiv/devices/cuda16/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace { __global__ void inplace_multiply_const_dev( float k, std::uint32_t size, half *px) { const std::uint32_t i = IDX; if (i < size) px[i] = ::__float2half(::__half2float(px[i]) * k); } } // namespace namespace primitiv { namespace devices { void CUDA16::inplace_multiply_const_impl(float k, Tensor &x) { const std::uint32_t size = x.shape().size(); const std::uint32_t g1 = GRID_SIZE(size, dim1_x_); CUDA_CALL(::hipSetDevice(dev_id_)); hipLaunchKernelGGL(( ::inplace_multiply_const_dev), dim3(g1), dim3(dim1_x_), 0, 0, k, size, MDATA(half, x)); } } // namespace devices } // namespace primitiv
493a99fb8ce62b7100d6afe37ea859bd894de79f.cu
#include <primitiv/config.h> #include <primitiv/devices/cuda16/device.h> #include <primitiv/devices/cuda16/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace { __global__ void inplace_multiply_const_dev( float k, std::uint32_t size, half *px) { const std::uint32_t i = IDX; if (i < size) px[i] = ::__float2half(::__half2float(px[i]) * k); } } // namespace namespace primitiv { namespace devices { void CUDA16::inplace_multiply_const_impl(float k, Tensor &x) { const std::uint32_t size = x.shape().size(); const std::uint32_t g1 = GRID_SIZE(size, dim1_x_); CUDA_CALL(::cudaSetDevice(dev_id_)); ::inplace_multiply_const_dev<<<g1, dim1_x_>>>(k, size, MDATA(half, x)); } } // namespace devices } // namespace primitiv
1991c1c0a2da1692bf94289b95993c718ee2063b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> extern "C" { #include "check.h" #include "timer.h" #include "init.h" #include "template.h" } void reset_host_data(float * arr, const size_t bytes) { memset(arr, 0, bytes); } float recursiveReduce(float * arr, const int n) { if (n == 1) return arr[0]; // if n is odd, sum up the last element with the first element if ((n % 2) != 0) arr[0] += arr[n-1]; int stride = n / 2; for (int k=0; k<stride; k++) { arr[k] += arr[k + stride]; } return recursiveReduce(arr, stride); } // ########### K E R N E L S ############ __global__ void reduceNeighbored_warpDivergence(float * data, float * reduc, const int n) { int thx = threadIdx.x; int idx = thx + blockIdx.x * blockDim.x; if (idx >= n) return; float * ptr = data + blockIdx.x * blockDim.x; for (int stride=1; stride < blockDim.x; stride *= 2) { if ( thx % (2*stride) != 0) continue; ptr[thx] += ptr[thx + stride]; __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceNeighbored_NoWarpDivergence(float * data, float * reduc, const int n) { int thx = threadIdx.x; int idx = thx + blockIdx.x + blockDim.x; if (idx >= n) return; float * ptr = data + blockIdx.x * blockDim.x; for (int stride=1; stride<blockDim.x; stride *= 2) { int index = 2 * stride * thx; if (index < blockDim.x) ptr[index] += ptr[index + stride]; // see table above __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceInterleavedPair(float * data, float * reduc, const int n) { size_t thx = threadIdx.x; size_t idx = thx + blockIdx.x * blockDim.x; if (idx >= n) return; float * ptr = data + blockIdx.x * blockDim.x; for (int stride = blockDim.x/2; stride > 0; stride >>= 1) { if (thx < stride) { ptr[thx] += ptr[thx + stride]; } __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceUnroll2(float * data, float * reduc, const int n) { size_t thx = threadIdx.x; size_t idx = thx + blockIdx.x * blockDim.x * 2; float * ptr = data + blockIdx.x * blockDim.x * 2; // unrolling 2 data blocks if (thx + blockDim.x < n) data[idx] += data[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride=blockDim.x/2; stride>0; stride >>= 1) { if (thx < stride) ptr[thx] += ptr[thx + stride]; __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceUnroll4(float * data, float * reduc, const int n) { size_t thx = threadIdx.x; size_t idx = thx + blockIdx.x * blockDim.x * 4; float * ptr = data + blockIdx.x * blockDim.x * 4; // unrolling 4 data blocks if (thx + blockDim.x * 3 < n) { data[idx] += data[idx + blockDim.x] + data[idx + blockDim.x * 2] + data[idx + blockDim.x * 3]; __syncthreads(); } // in-place reduction in global memory for (int stride=blockDim.x/2; stride>0; stride>>=1) { if (thx < stride) ptr[thx] += ptr[thx + stride]; __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceUnroll8(float * data, float * reduc, const int n) { size_t thx = threadIdx.x; size_t idx = thx + blockIdx.x * blockDim.x * 8; float * ptr = data + blockIdx.x * blockDim.x * 8; // unrolling 8 data blocks if (thx + blockDim.x * 7 < n) { data[idx] += data[idx + blockDim.x] + data[idx + blockDim.x * 2] + data[idx + blockDim.x * 3] + data[idx + blockDim.x * 4] + data[idx + blockDim.x * 5] + data[idx + blockDim.x * 6] + data[idx + blockDim.x * 7]; __syncthreads(); } // in-place reduction in global memory for (int stride=blockDim.x/2; stride>0; stride>>=1) { if (thx < stride) ptr[thx] += ptr[thx + stride]; __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceUnroll8_warp(float * data, float * reduc, const int n) { size_t thx = threadIdx.x; size_t idx = thx + blockIdx.x * blockDim.x * 8; float * ptr = data + blockIdx.x * blockDim.x * 8; // unrolling 8 data blocks if (thx + blockDim.x * 7 < n) { data[idx] += data[idx + blockDim.x] + data[idx + blockDim.x * 2] + data[idx + blockDim.x * 3] + data[idx + blockDim.x * 4] + data[idx + blockDim.x * 5] + data[idx + blockDim.x * 6] + data[idx + blockDim.x * 7]; __syncthreads(); } // in-place reduction in global memory for (int stride=blockDim.x/2; stride>32; stride>>=1) { if (thx < stride) ptr[thx] += ptr[thx + stride]; __syncthreads(); } // warp unrolling if (thx < 32) { volatile float * vmem = data; vmem[thx] += vmem[thx + 32]; vmem[thx] += vmem[thx + 16]; vmem[thx] += vmem[thx + 8]; vmem[thx] += vmem[thx + 4]; vmem[thx] += vmem[thx + 2]; vmem[thx] += vmem[thx + 1]; } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } // ########### R U N T I M E W R A P P E R S ############ void cpu_do_sum(const float * h_data, float * h_reduc, const int n, const int m) { size_t nbytes = n * sizeof(float), mbytes = m * sizeof(float); memset(h_reduc, 0, mbytes); double t_cpu = getTime(); // float * _data, * _reduc; float * _data = (float *)malloc(nbytes); float * _reduc = (float *)malloc(mbytes); memcpy(_data, h_data, nbytes); memcpy(_reduc, h_reduc, mbytes); recursiveReduce(_data, n); _reduc[0] = _data[0]; // free(_data); free(_reduc); float dt_cpu = getTime() - t_cpu; printf("Info: cpu_do_sum: sum_cpu = %.8f\n", _reduc[0]); printf("Info: dt_cpu = %.6f sec\n", dt_cpu); } void warmup() { size_t n = 1 << 14, nf = n * sizeof(float); dim3 block(256, 1), grid((n+block.x-1)/block.x, 1); int ng = grid.x * sizeof(float); float * h_data = (float *)malloc(nf); float * h_reduc = (float *)malloc(ng); for (size_t k=0; k<n; k++) h_data[k] = 1.0; memset(h_reduc, 0.0, ng); double t_gpu = getTime(), dt_gpu=0.0; float * d_data, * d_reduc; CHECK(hipMalloc((float **)&d_data, nf)); CHECK(hipMalloc((float **)&d_reduc, ng)); CHECK(hipMemcpy(d_data, h_data, nf, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_reduc, h_reduc, ng, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceNeighbored_warpDivergence) , dim3(grid), dim3(block) , 0, 0, d_data, d_reduc, n); CHECK(hipDeviceSynchronize()); CHECK(hipMemcpy(h_reduc, d_reduc, ng, hipMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_cpu = (double)n, res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: warm-up: \n"); printf("Info: dt = %.6f sec\n", dt_gpu); printf("Info: res: %.8f ?= %.8f \n", res_cpu, res_gpu); hipFree(d_data); hipFree(d_reduc); free(h_data); free(h_reduc); } /* void gpu_no_divergence(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(hipMalloc((float **)&d_data, bytes_data)); CHECK(hipMalloc((float **)&d_reduc, bytes_reduc)); CHECK(hipMemcpy(d_data, h_data, bytes_data, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_reduc, h_reduc, bytes_reduc, hipMemcpyHostToDevice)); // kernel launch reduceNeighbored_NoWarpDivergence <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(hipDeviceSynchronize()); // copy d2h CHECK(hipMemcpy(h_reduc, d_reduc, bytes_reduc, hipMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_no_divergence: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); hipFree(d_data); hipFree(d_reduc); } void gpu_interleaved_pair(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(hipMalloc((float **)&d_data, bytes_data)); CHECK(hipMalloc((float **)&d_reduc, bytes_reduc)); CHECK(hipMemcpy(d_data, h_data, bytes_data, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_reduc, h_reduc, bytes_reduc, hipMemcpyHostToDevice)); // kernel launch reduceInterleavedPair <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(hipDeviceSynchronize()); // copy d2h CHECK(hipMemcpy(h_reduc, d_reduc, bytes_reduc, hipMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_interleaved_pair: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); } void gpu_unroll_2(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(hipMalloc((float **)&d_data, bytes_data)); CHECK(hipMalloc((float **)&d_reduc, bytes_reduc)); CHECK(hipMemcpy(d_data, h_data, bytes_data, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_reduc, h_reduc, bytes_reduc, hipMemcpyHostToDevice)); // kernel launch reduceUnroll2 <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(hipDeviceSynchronize()); // copy d2h CHECK(hipMemcpy(h_reduc, d_reduc, bytes_reduc, hipMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_unroll_2: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); } void gpu_unroll_4(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(hipMalloc((float **)&d_data, bytes_data)); CHECK(hipMalloc((float **)&d_reduc, bytes_reduc)); CHECK(hipMemcpy(d_data, h_data, bytes_data, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_reduc, h_reduc, bytes_reduc, hipMemcpyHostToDevice)); // kernel launch reduceUnroll4 <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(hipDeviceSynchronize()); // copy d2h CHECK(hipMemcpy(h_reduc, d_reduc, bytes_reduc, hipMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_unroll_4: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); } void gpu_unroll_8(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(hipMalloc((float **)&d_data, bytes_data)); CHECK(hipMalloc((float **)&d_reduc, bytes_reduc)); CHECK(hipMemcpy(d_data, h_data, bytes_data, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_reduc, h_reduc, bytes_reduc, hipMemcpyHostToDevice)); // kernel launch reduceUnroll8 <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(hipDeviceSynchronize()); // copy d2h CHECK(hipMemcpy(h_reduc, d_reduc, bytes_reduc, hipMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_unroll_2: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); } void gpu_unroll_8_warp_unroll(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(hipMalloc((float **)&d_data, bytes_data)); CHECK(hipMalloc((float **)&d_reduc, bytes_reduc)); CHECK(hipMemcpy(d_data, h_data, bytes_data, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_reduc, h_reduc, bytes_reduc, hipMemcpyHostToDevice)); // kernel launch reduceUnroll8_warp <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(hipDeviceSynchronize()); // copy d2h CHECK(hipMemcpy(h_reduc, d_reduc, bytes_reduc, hipMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_unroll_2: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); } */ // ########### M A I N ########### int main(int argc, char** argv) { // test input and execution if (argc != 2) { printf("Error: Wrong number of arguments. Call e.g. \n"); printf(" $> run.exe <threads_x>\n"); printf("where <threads_x> is the number of threads \n"); printf("per block along x-direction.\n"); exit(EXIT_FAILURE); } const int dev = 0; hipDeviceProp_t dev_prop; CHECK(hipGetDeviceProperties(&dev_prop, dev)); CHECK(hipSetDevice(dev)); printf("Info: Using device: %s. \n", dev_prop.name); // problem setup const unsigned int size = 1 << 24; // kernel launch configs int thrdx = atoi(argv[1]); if (thrdx < 1) thrdx = 128; dim3 block(thrdx, 1); dim3 grid((size-1)/block.x+1, 1); printf("Info: grid=(%d, %d), block=(%d, %d)\n", grid.x, grid.y, block.x, block.y); const unsigned int bytes_data = size * sizeof(float); const unsigned int bytes_reduc = grid.x * sizeof(float); // host arrays float * h_data = (float *) malloc(bytes_data); // length N float * h_reduc = (float *) malloc(bytes_reduc); // length num. grid blocks // initialize data to random values init_float(h_data, size); reset_host_data(h_reduc, bytes_reduc); cpu_do_sum(h_data, h_reduc, size, grid.x); // warm up the device warmup(); // warp divergence printf("\nInfo: reduceNeighbored_warpDivergence \n"); TestPlatform(reduceNeighbored_warpDivergence, \ h_data, h_reduc, size, grid.x, grid, block); // no warp divergence printf("\nInfo: reduceNeighbored_NoWarpDivergence \n"); TestPlatform(reduceNeighbored_NoWarpDivergence, \ h_data, h_reduc, size, grid.x, grid, block); // interleaved-pair approach printf("\nInfo: reduceInterleavedPair \n"); TestPlatform(reduceInterleavedPair, \ h_data, h_reduc, size, grid.x, grid, block); // loop unrolling // 2 blocks per thread printf("\nInfo: reduceUnroll2\n"); dim3 grid_2((size-1)/block.x+1, 1); TestPlatform(reduceUnroll2, h_data, h_reduc, size, grid_2.x, grid_2, block); // 4 blocks per thread printf("\nInfo: reduceUnroll4\n"); dim3 grid_4((size-1)/block.x+1, 1); TestPlatform(reduceUnroll4, h_data, h_reduc, size, grid_4.x, grid_4, block); // 8 blocks per thread printf("\nInfo: reduceUnroll8\n"); dim3 grid_8((size-1)/block.x+1, 1); TestPlatform(reduceUnroll8, h_data, h_reduc, size, grid_8.x, grid_8, block); // 8 blocks per thread incuding warp unrolling printf("\nInfo: reduceUnroll8_warp\n"); TestPlatform(reduceUnroll8_warp, \ h_data, h_reduc, size, grid_8.x, grid_8, block); // free up memory free(h_data); free(h_reduc); // thanks and goodbye :-) CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
1991c1c0a2da1692bf94289b95993c718ee2063b.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> extern "C" { #include "check.h" #include "timer.h" #include "init.h" #include "template.h" } void reset_host_data(float * arr, const size_t bytes) { memset(arr, 0, bytes); } float recursiveReduce(float * arr, const int n) { if (n == 1) return arr[0]; // if n is odd, sum up the last element with the first element if ((n % 2) != 0) arr[0] += arr[n-1]; int stride = n / 2; for (int k=0; k<stride; k++) { arr[k] += arr[k + stride]; } return recursiveReduce(arr, stride); } // ########### K E R N E L S ############ __global__ void reduceNeighbored_warpDivergence(float * data, float * reduc, const int n) { int thx = threadIdx.x; int idx = thx + blockIdx.x * blockDim.x; if (idx >= n) return; float * ptr = data + blockIdx.x * blockDim.x; for (int stride=1; stride < blockDim.x; stride *= 2) { if ( thx % (2*stride) != 0) continue; ptr[thx] += ptr[thx + stride]; __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceNeighbored_NoWarpDivergence(float * data, float * reduc, const int n) { int thx = threadIdx.x; int idx = thx + blockIdx.x + blockDim.x; if (idx >= n) return; float * ptr = data + blockIdx.x * blockDim.x; for (int stride=1; stride<blockDim.x; stride *= 2) { int index = 2 * stride * thx; if (index < blockDim.x) ptr[index] += ptr[index + stride]; // see table above __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceInterleavedPair(float * data, float * reduc, const int n) { size_t thx = threadIdx.x; size_t idx = thx + blockIdx.x * blockDim.x; if (idx >= n) return; float * ptr = data + blockIdx.x * blockDim.x; for (int stride = blockDim.x/2; stride > 0; stride >>= 1) { if (thx < stride) { ptr[thx] += ptr[thx + stride]; } __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceUnroll2(float * data, float * reduc, const int n) { size_t thx = threadIdx.x; size_t idx = thx + blockIdx.x * blockDim.x * 2; float * ptr = data + blockIdx.x * blockDim.x * 2; // unrolling 2 data blocks if (thx + blockDim.x < n) data[idx] += data[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride=blockDim.x/2; stride>0; stride >>= 1) { if (thx < stride) ptr[thx] += ptr[thx + stride]; __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceUnroll4(float * data, float * reduc, const int n) { size_t thx = threadIdx.x; size_t idx = thx + blockIdx.x * blockDim.x * 4; float * ptr = data + blockIdx.x * blockDim.x * 4; // unrolling 4 data blocks if (thx + blockDim.x * 3 < n) { data[idx] += data[idx + blockDim.x] + data[idx + blockDim.x * 2] + data[idx + blockDim.x * 3]; __syncthreads(); } // in-place reduction in global memory for (int stride=blockDim.x/2; stride>0; stride>>=1) { if (thx < stride) ptr[thx] += ptr[thx + stride]; __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceUnroll8(float * data, float * reduc, const int n) { size_t thx = threadIdx.x; size_t idx = thx + blockIdx.x * blockDim.x * 8; float * ptr = data + blockIdx.x * blockDim.x * 8; // unrolling 8 data blocks if (thx + blockDim.x * 7 < n) { data[idx] += data[idx + blockDim.x] + data[idx + blockDim.x * 2] + data[idx + blockDim.x * 3] + data[idx + blockDim.x * 4] + data[idx + blockDim.x * 5] + data[idx + blockDim.x * 6] + data[idx + blockDim.x * 7]; __syncthreads(); } // in-place reduction in global memory for (int stride=blockDim.x/2; stride>0; stride>>=1) { if (thx < stride) ptr[thx] += ptr[thx + stride]; __syncthreads(); } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } __global__ void reduceUnroll8_warp(float * data, float * reduc, const int n) { size_t thx = threadIdx.x; size_t idx = thx + blockIdx.x * blockDim.x * 8; float * ptr = data + blockIdx.x * blockDim.x * 8; // unrolling 8 data blocks if (thx + blockDim.x * 7 < n) { data[idx] += data[idx + blockDim.x] + data[idx + blockDim.x * 2] + data[idx + blockDim.x * 3] + data[idx + blockDim.x * 4] + data[idx + blockDim.x * 5] + data[idx + blockDim.x * 6] + data[idx + blockDim.x * 7]; __syncthreads(); } // in-place reduction in global memory for (int stride=blockDim.x/2; stride>32; stride>>=1) { if (thx < stride) ptr[thx] += ptr[thx + stride]; __syncthreads(); } // warp unrolling if (thx < 32) { volatile float * vmem = data; vmem[thx] += vmem[thx + 32]; vmem[thx] += vmem[thx + 16]; vmem[thx] += vmem[thx + 8]; vmem[thx] += vmem[thx + 4]; vmem[thx] += vmem[thx + 2]; vmem[thx] += vmem[thx + 1]; } if (thx == 0) reduc[blockIdx.x] = ptr[0]; } // ########### R U N T I M E W R A P P E R S ############ void cpu_do_sum(const float * h_data, float * h_reduc, const int n, const int m) { size_t nbytes = n * sizeof(float), mbytes = m * sizeof(float); memset(h_reduc, 0, mbytes); double t_cpu = getTime(); // float * _data, * _reduc; float * _data = (float *)malloc(nbytes); float * _reduc = (float *)malloc(mbytes); memcpy(_data, h_data, nbytes); memcpy(_reduc, h_reduc, mbytes); recursiveReduce(_data, n); _reduc[0] = _data[0]; // free(_data); free(_reduc); float dt_cpu = getTime() - t_cpu; printf("Info: cpu_do_sum: sum_cpu = %.8f\n", _reduc[0]); printf("Info: dt_cpu = %.6f sec\n", dt_cpu); } void warmup() { size_t n = 1 << 14, nf = n * sizeof(float); dim3 block(256, 1), grid((n+block.x-1)/block.x, 1); int ng = grid.x * sizeof(float); float * h_data = (float *)malloc(nf); float * h_reduc = (float *)malloc(ng); for (size_t k=0; k<n; k++) h_data[k] = 1.0; memset(h_reduc, 0.0, ng); double t_gpu = getTime(), dt_gpu=0.0; float * d_data, * d_reduc; CHECK(cudaMalloc((float **)&d_data, nf)); CHECK(cudaMalloc((float **)&d_reduc, ng)); CHECK(cudaMemcpy(d_data, h_data, nf, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_reduc, h_reduc, ng, cudaMemcpyHostToDevice)); reduceNeighbored_warpDivergence <<< grid, block >>> (d_data, d_reduc, n); CHECK(cudaDeviceSynchronize()); CHECK(cudaMemcpy(h_reduc, d_reduc, ng, cudaMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_cpu = (double)n, res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: warm-up: \n"); printf("Info: dt = %.6f sec\n", dt_gpu); printf("Info: res: %.8f ?= %.8f \n", res_cpu, res_gpu); cudaFree(d_data); cudaFree(d_reduc); free(h_data); free(h_reduc); } /* void gpu_no_divergence(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(cudaMalloc((float **)&d_data, bytes_data)); CHECK(cudaMalloc((float **)&d_reduc, bytes_reduc)); CHECK(cudaMemcpy(d_data, h_data, bytes_data, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_reduc, h_reduc, bytes_reduc, cudaMemcpyHostToDevice)); // kernel launch reduceNeighbored_NoWarpDivergence <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(cudaDeviceSynchronize()); // copy d2h CHECK(cudaMemcpy(h_reduc, d_reduc, bytes_reduc, cudaMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_no_divergence: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); cudaFree(d_data); cudaFree(d_reduc); } void gpu_interleaved_pair(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(cudaMalloc((float **)&d_data, bytes_data)); CHECK(cudaMalloc((float **)&d_reduc, bytes_reduc)); CHECK(cudaMemcpy(d_data, h_data, bytes_data, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_reduc, h_reduc, bytes_reduc, cudaMemcpyHostToDevice)); // kernel launch reduceInterleavedPair <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(cudaDeviceSynchronize()); // copy d2h CHECK(cudaMemcpy(h_reduc, d_reduc, bytes_reduc, cudaMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_interleaved_pair: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); } void gpu_unroll_2(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(cudaMalloc((float **)&d_data, bytes_data)); CHECK(cudaMalloc((float **)&d_reduc, bytes_reduc)); CHECK(cudaMemcpy(d_data, h_data, bytes_data, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_reduc, h_reduc, bytes_reduc, cudaMemcpyHostToDevice)); // kernel launch reduceUnroll2 <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(cudaDeviceSynchronize()); // copy d2h CHECK(cudaMemcpy(h_reduc, d_reduc, bytes_reduc, cudaMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_unroll_2: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); } void gpu_unroll_4(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(cudaMalloc((float **)&d_data, bytes_data)); CHECK(cudaMalloc((float **)&d_reduc, bytes_reduc)); CHECK(cudaMemcpy(d_data, h_data, bytes_data, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_reduc, h_reduc, bytes_reduc, cudaMemcpyHostToDevice)); // kernel launch reduceUnroll4 <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(cudaDeviceSynchronize()); // copy d2h CHECK(cudaMemcpy(h_reduc, d_reduc, bytes_reduc, cudaMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_unroll_4: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); } void gpu_unroll_8(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(cudaMalloc((float **)&d_data, bytes_data)); CHECK(cudaMalloc((float **)&d_reduc, bytes_reduc)); CHECK(cudaMemcpy(d_data, h_data, bytes_data, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_reduc, h_reduc, bytes_reduc, cudaMemcpyHostToDevice)); // kernel launch reduceUnroll8 <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(cudaDeviceSynchronize()); // copy d2h CHECK(cudaMemcpy(h_reduc, d_reduc, bytes_reduc, cudaMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_unroll_2: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); } void gpu_unroll_8_warp_unroll(const float * h_data, float * h_reduc, const int ndat, const int nred, const dim3 grid, const dim3 block) { size_t bytes_data = ndat * sizeof(float); size_t bytes_reduc = nred * sizeof(float); float * d_data, * d_reduc; double t_gpu=0.0, dt_gpu=0.0; t_gpu = getTime(); CHECK(cudaMalloc((float **)&d_data, bytes_data)); CHECK(cudaMalloc((float **)&d_reduc, bytes_reduc)); CHECK(cudaMemcpy(d_data, h_data, bytes_data, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_reduc, h_reduc, bytes_reduc, cudaMemcpyHostToDevice)); // kernel launch reduceUnroll8_warp <<< grid, block >>> (d_data, d_reduc, ndat); CHECK(cudaDeviceSynchronize()); // copy d2h CHECK(cudaMemcpy(h_reduc, d_reduc, bytes_reduc, cudaMemcpyDeviceToHost)); dt_gpu = getTime() - t_gpu; double res_gpu = 0.0; for (int k=0; k<grid.x; k++) res_gpu += h_reduc[k]; printf("Info: gpu_unroll_2: \n"); printf("Info: res = %.8f, dt = %.6f sec\n", res_gpu, dt_gpu); } */ // ########### M A I N ########### int main(int argc, char** argv) { // test input and execution if (argc != 2) { printf("Error: Wrong number of arguments. Call e.g. \n"); printf(" $> run.exe <threads_x>\n"); printf("where <threads_x> is the number of threads \n"); printf("per block along x-direction.\n"); exit(EXIT_FAILURE); } const int dev = 0; cudaDeviceProp dev_prop; CHECK(cudaGetDeviceProperties(&dev_prop, dev)); CHECK(cudaSetDevice(dev)); printf("Info: Using device: %s. \n", dev_prop.name); // problem setup const unsigned int size = 1 << 24; // kernel launch configs int thrdx = atoi(argv[1]); if (thrdx < 1) thrdx = 128; dim3 block(thrdx, 1); dim3 grid((size-1)/block.x+1, 1); printf("Info: grid=(%d, %d), block=(%d, %d)\n", grid.x, grid.y, block.x, block.y); const unsigned int bytes_data = size * sizeof(float); const unsigned int bytes_reduc = grid.x * sizeof(float); // host arrays float * h_data = (float *) malloc(bytes_data); // length N float * h_reduc = (float *) malloc(bytes_reduc); // length num. grid blocks // initialize data to random values init_float(h_data, size); reset_host_data(h_reduc, bytes_reduc); cpu_do_sum(h_data, h_reduc, size, grid.x); // warm up the device warmup(); // warp divergence printf("\nInfo: reduceNeighbored_warpDivergence \n"); TestPlatform(reduceNeighbored_warpDivergence, \ h_data, h_reduc, size, grid.x, grid, block); // no warp divergence printf("\nInfo: reduceNeighbored_NoWarpDivergence \n"); TestPlatform(reduceNeighbored_NoWarpDivergence, \ h_data, h_reduc, size, grid.x, grid, block); // interleaved-pair approach printf("\nInfo: reduceInterleavedPair \n"); TestPlatform(reduceInterleavedPair, \ h_data, h_reduc, size, grid.x, grid, block); // loop unrolling // 2 blocks per thread printf("\nInfo: reduceUnroll2\n"); dim3 grid_2((size-1)/block.x+1, 1); TestPlatform(reduceUnroll2, h_data, h_reduc, size, grid_2.x, grid_2, block); // 4 blocks per thread printf("\nInfo: reduceUnroll4\n"); dim3 grid_4((size-1)/block.x+1, 1); TestPlatform(reduceUnroll4, h_data, h_reduc, size, grid_4.x, grid_4, block); // 8 blocks per thread printf("\nInfo: reduceUnroll8\n"); dim3 grid_8((size-1)/block.x+1, 1); TestPlatform(reduceUnroll8, h_data, h_reduc, size, grid_8.x, grid_8, block); // 8 blocks per thread incuding warp unrolling printf("\nInfo: reduceUnroll8_warp\n"); TestPlatform(reduceUnroll8_warp, \ h_data, h_reduc, size, grid_8.x, grid_8, block); // free up memory free(h_data); free(h_reduc); // thanks and goodbye :-) CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
792b77744283656417e01b20f5216c4a7efd6467.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmdotc.cu, normal z -> d, Thu Oct 8 23:05:48 2020 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 #define REAL // dot product for multiple vectors __global__ void magma_dmdotc1_kernel_1( int Gs, int n, double * v0, double * w0, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; // 1 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // block reduction for 1 vectors __global__ void magma_dmdotc1_kernel_2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx] = MAGMA_D_ZERO; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + (blockSize) < Gs ) ? vtmp[ i + (blockSize) ] : MAGMA_D_ZERO; i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } /** Purpose ------- Computes the scalar product of a set of 1 vectors such that skp[0] = [ <v_0,w_0> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDouble_ptr input vector @param[in] w0 magmaDouble_ptr input vector @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[4] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc1( magma_int_t n, magmaDouble_ptr v0, magmaDouble_ptr w0, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = (local_block_size) * sizeof( double ); // 1 skp magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_dmdotc1_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v0, w0, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dmdotc1_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_dgetvector( 1 , aux1, 1, skp, 1, queue ); return MAGMA_SUCCESS; } // 2 dot products // // initialize arrays with zero __global__ void magma_dmdotc2_gpumemzero( double * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 2; j++) d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_dmdotc2_kernel_1( int Gs, int n, double * v0, double * w0, double * v1, double * w1, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 2 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_D_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 2 vectors __global__ void magma_dmdotc2_kernel_2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 2 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDouble_ptr input vector @param[in] w0 magmaDouble_ptr input vector @param[in] v1 magmaDouble_ptr input vector @param[in] w1 magmaDouble_ptr input vector @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[3] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc2( magma_int_t n, magmaDouble_ptr v0, magmaDouble_ptr w0, magmaDouble_ptr v1, magmaDouble_ptr w1, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2 * (local_block_size) * sizeof( double ); // 4 skp magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_dmdotc2_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v0, w0, v1, w1, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dmdotc2_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_dgetvector( 2 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; } // 3 dot products // // initialize arrays with zero __global__ void magma_dmdotc3_gpumemzero( double * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 3; j++) d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_dmdotc3_kernel_1( int Gs, int n, double * v0, double * w0, double * v1, double * w1, double * v2, double * w2, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 3 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_D_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_D_ZERO; temp[ Idx + 2*blockDim.x ] = ( i < n ) ? v2[ i ] * w2[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<3; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<3; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<3; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<3; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 3 vectors __global__ void magma_dmdotc3_kernel_2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<3; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<3; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<3; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<3; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 4 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDouble_ptr input vector @param[in] w0 magmaDouble_ptr input vector @param[in] v1 magmaDouble_ptr input vector @param[in] w1 magmaDouble_ptr input vector @param[in] v2 magmaDouble_ptr input vector @param[in] w2 magmaDouble_ptr input vector @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[3] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc3( magma_int_t n, magmaDouble_ptr v0, magmaDouble_ptr w0, magmaDouble_ptr v1, magmaDouble_ptr w1, magmaDouble_ptr v2, magmaDouble_ptr w2, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 3 * (local_block_size) * sizeof( double ); // 4 skp magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; // magma_dmdotc3_gpumemzero<<< Gs, Bs, 0, queue->cuda_stream() >>>( d1, n ); hipLaunchKernelGGL(( magma_dmdotc3_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v0, w0, v1, w1, v2, w2, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dmdotc3_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_dgetvector( 3 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; } // 4 dot products // // initialize arrays with zero __global__ void magma_dmdotc4_gpumemzero( double * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 4; j++) d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_dmdotc4_kernel_1( int Gs, int n, double * v0, double * w0, double * v1, double * w1, double * v2, double * w2, double * v3, double * w3, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 4 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_D_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_D_ZERO; temp[ Idx + 2*blockDim.x ] = ( i < n ) ? v2[ i ] * w2[ i ] : MAGMA_D_ZERO; temp[ Idx + 3*blockDim.x ] = ( i < n ) ? v3[ i ] * w3[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<4; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<4; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<4; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<4; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 4 vectors __global__ void magma_dmdotc4_kernel_2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<4; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<4; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<4; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<4; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 4 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDouble_ptr input vector @param[in] w0 magmaDouble_ptr input vector @param[in] v1 magmaDouble_ptr input vector @param[in] w1 magmaDouble_ptr input vector @param[in] v2 magmaDouble_ptr input vector @param[in] w2 magmaDouble_ptr input vector @param[in] v3 magmaDouble_ptr input vector @param[in] w3 magmaDouble_ptr input vector @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[4] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc4( magma_int_t n, magmaDouble_ptr v0, magmaDouble_ptr w0, magmaDouble_ptr v1, magmaDouble_ptr w1, magmaDouble_ptr v2, magmaDouble_ptr w2, magmaDouble_ptr v3, magmaDouble_ptr w3, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4 * (local_block_size) * sizeof( double ); // 4 skp magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_dmdotc4_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v0, w0, v1, w1, v2, w2, v3, w3, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dmdotc4_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_dgetvector( 4 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; }
792b77744283656417e01b20f5216c4a7efd6467.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmdotc.cu, normal z -> d, Thu Oct 8 23:05:48 2020 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 #define REAL // dot product for multiple vectors __global__ void magma_dmdotc1_kernel_1( int Gs, int n, double * v0, double * w0, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; // 1 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // block reduction for 1 vectors __global__ void magma_dmdotc1_kernel_2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx] = MAGMA_D_ZERO; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + (blockSize) < Gs ) ? vtmp[ i + (blockSize) ] : MAGMA_D_ZERO; i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } /** Purpose ------- Computes the scalar product of a set of 1 vectors such that skp[0] = [ <v_0,w_0> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDouble_ptr input vector @param[in] w0 magmaDouble_ptr input vector @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[4] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc1( magma_int_t n, magmaDouble_ptr v0, magmaDouble_ptr w0, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = (local_block_size) * sizeof( double ); // 1 skp magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; magma_dmdotc1_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( Gs.x, n, v0, w0, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dmdotc1_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_dgetvector( 1 , aux1, 1, skp, 1, queue ); return MAGMA_SUCCESS; } // 2 dot products // // initialize arrays with zero __global__ void magma_dmdotc2_gpumemzero( double * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 2; j++) d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_dmdotc2_kernel_1( int Gs, int n, double * v0, double * w0, double * v1, double * w1, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 2 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_D_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 2 vectors __global__ void magma_dmdotc2_kernel_2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 2 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDouble_ptr input vector @param[in] w0 magmaDouble_ptr input vector @param[in] v1 magmaDouble_ptr input vector @param[in] w1 magmaDouble_ptr input vector @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[3] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc2( magma_int_t n, magmaDouble_ptr v0, magmaDouble_ptr w0, magmaDouble_ptr v1, magmaDouble_ptr w1, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2 * (local_block_size) * sizeof( double ); // 4 skp magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; magma_dmdotc2_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( Gs.x, n, v0, w0, v1, w1, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dmdotc2_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_dgetvector( 2 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; } // 3 dot products // // initialize arrays with zero __global__ void magma_dmdotc3_gpumemzero( double * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 3; j++) d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_dmdotc3_kernel_1( int Gs, int n, double * v0, double * w0, double * v1, double * w1, double * v2, double * w2, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 3 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_D_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_D_ZERO; temp[ Idx + 2*blockDim.x ] = ( i < n ) ? v2[ i ] * w2[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<3; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<3; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<3; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<3; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 3 vectors __global__ void magma_dmdotc3_kernel_2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<3; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<3; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<3; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<3; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 4 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDouble_ptr input vector @param[in] w0 magmaDouble_ptr input vector @param[in] v1 magmaDouble_ptr input vector @param[in] w1 magmaDouble_ptr input vector @param[in] v2 magmaDouble_ptr input vector @param[in] w2 magmaDouble_ptr input vector @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[3] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc3( magma_int_t n, magmaDouble_ptr v0, magmaDouble_ptr w0, magmaDouble_ptr v1, magmaDouble_ptr w1, magmaDouble_ptr v2, magmaDouble_ptr w2, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 3 * (local_block_size) * sizeof( double ); // 4 skp magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; // magma_dmdotc3_gpumemzero<<< Gs, Bs, 0, queue->cuda_stream() >>>( d1, n ); magma_dmdotc3_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( Gs.x, n, v0, w0, v1, w1, v2, w2, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dmdotc3_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_dgetvector( 3 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; } // 4 dot products // // initialize arrays with zero __global__ void magma_dmdotc4_gpumemzero( double * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 4; j++) d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_dmdotc4_kernel_1( int Gs, int n, double * v0, double * w0, double * v1, double * w1, double * v2, double * w2, double * v3, double * w3, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 4 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_D_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_D_ZERO; temp[ Idx + 2*blockDim.x ] = ( i < n ) ? v2[ i ] * w2[ i ] : MAGMA_D_ZERO; temp[ Idx + 3*blockDim.x ] = ( i < n ) ? v3[ i ] * w3[ i ] : MAGMA_D_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<4; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<4; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<4; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<4; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 4 vectors __global__ void magma_dmdotc4_kernel_2( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<4; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<4; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<4; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<4; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 4 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDouble_ptr input vector @param[in] w0 magmaDouble_ptr input vector @param[in] v1 magmaDouble_ptr input vector @param[in] w1 magmaDouble_ptr input vector @param[in] v2 magmaDouble_ptr input vector @param[in] w2 magmaDouble_ptr input vector @param[in] v3 magmaDouble_ptr input vector @param[in] w3 magmaDouble_ptr input vector @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[4] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc4( magma_int_t n, magmaDouble_ptr v0, magmaDouble_ptr w0, magmaDouble_ptr v1, magmaDouble_ptr w1, magmaDouble_ptr v2, magmaDouble_ptr w2, magmaDouble_ptr v3, magmaDouble_ptr w3, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4 * (local_block_size) * sizeof( double ); // 4 skp magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; magma_dmdotc4_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( Gs.x, n, v0, w0, v1, w1, v2, w2, v3, w3, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dmdotc4_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_dgetvector( 4 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; }
bc742a6bd334e521d5ac16a24aef5062cc81cf40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" #include <stdio.h> #define min(a,b) (((a)<(b))?(a):(b)) #define max(a,b) (((a)>(b))?(a):(b)) __global__ void shmem_reduce_kernel(float * d_out, const float * d_in, int n, int op) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; if (myId >= n) return; // load shared mem from global mem sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { if (op == 0) sdata[tid] = min(sdata[tid], sdata[tid + s]); else if (op == 1) sdata[tid] = max(sdata[tid], sdata[tid + s]); } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } __global__ void histogram(const float * const d_logLuminance, unsigned int *d_hist, float lumMin, const float range, const int numBins) { int myId = threadIdx.x + blockIdx.x * blockDim.x; float lum = d_logLuminance[myId]; int myBin = (lum - lumMin) / range * numBins; atomicAdd(&(d_hist[myBin]), 1); } __global__ void exclusiveScan(unsigned int * d_hist, unsigned int * const d_cdf, const int numBins) { extern __shared__ unsigned int tmp[]; int tid = threadIdx.x; tmp[tid] = (tid > 0) ? d_hist[tid - 1] : 0; __syncthreads(); for(int s = 1; s < numBins; s *= 2) { unsigned int t = tmp[tid]; __syncthreads(); if(tid + s < numBins) { tmp[tid + s] += t; } __syncthreads(); } d_cdf[tid] = tmp[tid]; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { /*1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum */ int n = numRows * numCols; // declare GPU memory pointers float * d_intermediate; float * d_temp; // allocate GPU memory checkCudaErrors(hipMalloc((void **) &d_intermediate, n*sizeof(float))); checkCudaErrors(hipMalloc((void **) &d_temp, sizeof(float))); const int maxThreadsPerBlock = 1024; int threads = maxThreadsPerBlock; int blocks = n / maxThreadsPerBlock; int shared = threads * sizeof(float); hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), shared, 0, d_intermediate, d_logLuminance, n, 0); hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(1), dim3(blocks), shared, 0, d_temp, d_intermediate, n, 0); checkCudaErrors(hipMemcpy(&min_logLum, d_temp, sizeof(float), hipMemcpyDeviceToHost)); hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), shared, 0, d_intermediate, d_logLuminance, n, 1); hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(1), dim3(blocks), shared, 0, d_temp, d_intermediate, n, 1); checkCudaErrors(hipMemcpy(&max_logLum, d_temp, sizeof(float), hipMemcpyDeviceToHost)); /*2) subtract them to find the range */ float range = max_logLum - min_logLum; /*3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins */ unsigned int * d_hist; checkCudaErrors(hipMalloc((void**) &d_hist, sizeof(unsigned int) * numBins)); checkCudaErrors(hipMemset(d_hist, 0, sizeof(int)*numBins)); hipLaunchKernelGGL(( histogram), dim3(blocks), dim3(threads), 0, 0, d_logLuminance, d_hist, min_logLum, range, numBins); /*4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ hipLaunchKernelGGL(( exclusiveScan), dim3(1), dim3(threads), sizeof(unsigned int) * threads, 0, d_hist, d_cdf, numBins); }
bc742a6bd334e521d5ac16a24aef5062cc81cf40.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" #include <stdio.h> #define min(a,b) (((a)<(b))?(a):(b)) #define max(a,b) (((a)>(b))?(a):(b)) __global__ void shmem_reduce_kernel(float * d_out, const float * d_in, int n, int op) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; if (myId >= n) return; // load shared mem from global mem sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { if (op == 0) sdata[tid] = min(sdata[tid], sdata[tid + s]); else if (op == 1) sdata[tid] = max(sdata[tid], sdata[tid + s]); } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } __global__ void histogram(const float * const d_logLuminance, unsigned int *d_hist, float lumMin, const float range, const int numBins) { int myId = threadIdx.x + blockIdx.x * blockDim.x; float lum = d_logLuminance[myId]; int myBin = (lum - lumMin) / range * numBins; atomicAdd(&(d_hist[myBin]), 1); } __global__ void exclusiveScan(unsigned int * d_hist, unsigned int * const d_cdf, const int numBins) { extern __shared__ unsigned int tmp[]; int tid = threadIdx.x; tmp[tid] = (tid > 0) ? d_hist[tid - 1] : 0; __syncthreads(); for(int s = 1; s < numBins; s *= 2) { unsigned int t = tmp[tid]; __syncthreads(); if(tid + s < numBins) { tmp[tid + s] += t; } __syncthreads(); } d_cdf[tid] = tmp[tid]; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { /*1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum */ int n = numRows * numCols; // declare GPU memory pointers float * d_intermediate; float * d_temp; // allocate GPU memory checkCudaErrors(cudaMalloc((void **) &d_intermediate, n*sizeof(float))); checkCudaErrors(cudaMalloc((void **) &d_temp, sizeof(float))); const int maxThreadsPerBlock = 1024; int threads = maxThreadsPerBlock; int blocks = n / maxThreadsPerBlock; int shared = threads * sizeof(float); shmem_reduce_kernel<<<blocks, threads, shared>>> (d_intermediate, d_logLuminance, n, 0); shmem_reduce_kernel<<<1, blocks, shared>>> (d_temp, d_intermediate, n, 0); checkCudaErrors(cudaMemcpy(&min_logLum, d_temp, sizeof(float), cudaMemcpyDeviceToHost)); shmem_reduce_kernel<<<blocks, threads, shared>>> (d_intermediate, d_logLuminance, n, 1); shmem_reduce_kernel<<<1, blocks, shared>>> (d_temp, d_intermediate, n, 1); checkCudaErrors(cudaMemcpy(&max_logLum, d_temp, sizeof(float), cudaMemcpyDeviceToHost)); /*2) subtract them to find the range */ float range = max_logLum - min_logLum; /*3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins */ unsigned int * d_hist; checkCudaErrors(cudaMalloc((void**) &d_hist, sizeof(unsigned int) * numBins)); checkCudaErrors(cudaMemset(d_hist, 0, sizeof(int)*numBins)); histogram<<<blocks, threads>>> (d_logLuminance, d_hist, min_logLum, range, numBins); /*4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ exclusiveScan<<<1, threads, sizeof(unsigned int) * threads>>> (d_hist, d_cdf, numBins); }
31ca0e74c137fcd2745afa523e9cfe88d8ba5877.hip
// !!! This is a file automatically generated by hipify!!! // Multiplicao de matrizes em CUDA // Disciplina: OPRP001 - Programao Paralela // Prof.: Mauricio Pillon // Aluno: Renato Tanaka #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> // Matriz Quadrada (nro_linhas = nro_colunas) #define N 4 // Nmero de linhas // Nmero de colunas // GPU: Multiplicao das matrizes (a) e (b), resultado em (c) __global__ void matMult (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; dc[i*N+j] = 0; for(int k=0; k<N; k++) dc[i*N+j] += da[i*N+k] * db[k*N+j]; } // GPU: Imprime ndices na matriz __global__ void printIndex (void) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; printf ("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n",i,j,(i*N+j), threadIdx.x, blockIdx.x, blockDim.x,threadIdx.y, blockIdx.y, blockDim.y); } // GPU: Inicializa os vetores (a), (b) e (c) na Memria Global __global__ void dirtyMem (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; da[i] = -1; db[i] = -2; dc[i] = -3; } // CPU: Inicializa os vetores (a) e (b) __host__ void initvet(int *host_a, int *host_b) { for (int i=0; i < N; i++) { for (int j=0; j < N; j++) { host_b[i*N+j] = (i+j)+((N-1)*i); host_a[i*N+j] = (N*N)-host_b[i*N+j]; } } } // CPU: Imprime matriz __host__ void printMat (int *mat){ for (int j =0; j < N; j++) printf("\t(%d)", j); printf("\n"); for (int i=0; i < N; i++) { printf("(%d)", i); for (int j=0; j < N; j++){ printf("\t%d", mat[i*N+j]); } printf("\n"); } } // CPU: funo principal int main(int argc, char const *argv[]) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; // Alocao de matriz quadrada size = N * N * sizeof(int); // Alocao de memria no host hipHostMalloc((void **) &a, size); hipHostMalloc((void **) &b, size); hipHostMalloc((void **) &c, size); // Alocao de memria na GPU para os vetores (a,b e c) hipMalloc ((void **) &dev_a, size); hipMalloc ((void **) &dev_b, size); hipMalloc ((void **) &dev_c, size); // Atribui valores iniciais aos vetores em GPU hipLaunchKernelGGL(( dirtyMem), dim3(N), dim3(N), 0, 0, dev_a, dev_b, dev_c); // Cpia GPU para CPU hipMemcpy (a, dev_a, size, hipMemcpyDeviceToHost); hipMemcpy (b, dev_b, size, hipMemcpyDeviceToHost); hipMemcpy (c, dev_c, size, hipMemcpyDeviceToHost); // Impresso na tela dos valores dos vetores printf ("\t ### Valores Inicializados na GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Inicializao dos vetores (a) e (b) no host initvet(a,b); // Cpia dos vetores gerados em CPU p/ memria da GPU hipMemcpy (dev_a, a, size, hipMemcpyHostToDevice); hipMemcpy (dev_b, b, size, hipMemcpyHostToDevice); // Nmero de blocos e threads p/ dimenses (x,y) dim3 dimBlock (1, 1); dim3 dimThreads(N, N); // Imprime as posies acessadas pelo dimBlock e dimThreads hipLaunchKernelGGL(( printIndex), dim3(dimBlock), dim3(dimThreads), 0, 0, ); // Execuo do kernel matMult em GPU hipLaunchKernelGGL(( matMult), dim3(dimBlock), dim3(dimThreads), 0, 0, dev_a, dev_b, dev_c); hipDeviceSynchronize(); // Cpia do vetor (c) da GPU (Memria Global) para CPU hipMemcpy (c, dev_c, size, hipMemcpyDeviceToHost); // Impresso na tela dos valores dos vetores printf ("\t ### Valores aps processamento em GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Libera a Memria Global (GPU) hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); // Libera a Memria Global (CPU) hipHostFree(a); hipHostFree(b); hipHostFree(c); return 0; }
31ca0e74c137fcd2745afa523e9cfe88d8ba5877.cu
// Multiplicação de matrizes em CUDA // Disciplina: OPRP001 - Programação Paralela // Prof.: Mauricio Pillon // Aluno: Renato Tanaka #include <cuda.h> #include <stdio.h> #include <math.h> // Matriz Quadrada (nro_linhas = nro_colunas) #define N 4 // Número de linhas // Número de colunas // GPU: Multiplicação das matrizes (a) e (b), resultado em (c) __global__ void matMult (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; dc[i*N+j] = 0; for(int k=0; k<N; k++) dc[i*N+j] += da[i*N+k] * db[k*N+j]; } // GPU: Imprime índices na matriz __global__ void printIndex (void) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; printf ("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n",i,j,(i*N+j), threadIdx.x, blockIdx.x, blockDim.x,threadIdx.y, blockIdx.y, blockDim.y); } // GPU: Inicializa os vetores (a), (b) e (c) na Memória Global __global__ void dirtyMem (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; da[i] = -1; db[i] = -2; dc[i] = -3; } // CPU: Inicializa os vetores (a) e (b) __host__ void initvet(int *host_a, int *host_b) { for (int i=0; i < N; i++) { for (int j=0; j < N; j++) { host_b[i*N+j] = (i+j)+((N-1)*i); host_a[i*N+j] = (N*N)-host_b[i*N+j]; } } } // CPU: Imprime matriz __host__ void printMat (int *mat){ for (int j =0; j < N; j++) printf("\t(%d)", j); printf("\n"); for (int i=0; i < N; i++) { printf("(%d)", i); for (int j=0; j < N; j++){ printf("\t%d", mat[i*N+j]); } printf("\n"); } } // CPU: função principal int main(int argc, char const *argv[]) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; // Alocação de matriz quadrada size = N * N * sizeof(int); // Alocação de memória no host cudaMallocHost((void **) &a, size); cudaMallocHost((void **) &b, size); cudaMallocHost((void **) &c, size); // Alocação de memória na GPU para os vetores (a,b e c) cudaMalloc ((void **) &dev_a, size); cudaMalloc ((void **) &dev_b, size); cudaMalloc ((void **) &dev_c, size); // Atribui valores iniciais aos vetores em GPU dirtyMem<<<N, N>>>(dev_a, dev_b, dev_c); // Cópia GPU para CPU cudaMemcpy (a, dev_a, size, cudaMemcpyDeviceToHost); cudaMemcpy (b, dev_b, size, cudaMemcpyDeviceToHost); cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores Inicializados na GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Inicialização dos vetores (a) e (b) no host initvet(a,b); // Cópia dos vetores gerados em CPU p/ memória da GPU cudaMemcpy (dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy (dev_b, b, size, cudaMemcpyHostToDevice); // Número de blocos e threads p/ dimensões (x,y) dim3 dimBlock (1, 1); dim3 dimThreads(N, N); // Imprime as posições acessadas pelo dimBlock e dimThreads printIndex<<< dimBlock, dimThreads>>>(); // Execução do kernel matMult em GPU matMult<<< dimBlock, dimThreads>>>(dev_a, dev_b, dev_c); cudaDeviceSynchronize(); // Cópia do vetor (c) da GPU (Memória Global) para CPU cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores após processamento em GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Libera a Memória Global (GPU) cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); // Libera a Memória Global (CPU) cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); return 0; }
c33f554b604919c7cc7d61a463d0ca012ea229bd.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2019, Aman Gupta, ENG EC 527, Prof. Martin Herbordt */ /******************************************************************************/ /* Matrix library implementation for CUDA in C */ /******************************************************************************/ /* Libraries */ /******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "matrix_hip.cuh" #include "cuda_utils.cuh" /******************************************************************************/ /* Implementations */ /******************************************************************************/ __host__ Matrix * matrix_init(int rows, int cols) { // small check to ensure rows and cols are positive numbers if (rows <= 0 || cols <= 0) return NULL; Matrix *m = (Matrix *)malloc(sizeof(Matrix)); m -> rows = rows; m -> cols = cols; m -> data_d = NULL; m -> data_h = NULL; m -> device_assigned = false; m -> host_assigned = false; return m; } __host__ void matrix_allocate_host(Matrix *A) { if (!A->host_assigned) { data_t *data_h = (data_t *)calloc(A->rows*A->cols,sizeof(data_t)); if (!data_h) {printf("Unable to allocate matrix\n"); exit(-1);} A->data_h = data_h; A->host_assigned = true; } } __host__ Matrix *copy_matrix(Matrix *X) { Matrix *Z = matrix_init(X->rows, X->cols); matrix_allocate(Z, X->rows, X->cols); memcpy(Z->data_h, X->data_h, X->rows*X->cols*sizeof(data_t)); copy_matrix_H2D(Z); return Z; } __host__ int matrix_free_host(Matrix *A) { if (!A) {printf("freeing NULL pointer\n"); return -1;} assert(A->data_h); free(A->data_h); free(A); return 0; } __host__ void matrix_allocate(Matrix *A, int rows, int cols) { if (!A->device_assigned && !A->host_assigned) { A->rows = rows; A->cols = cols; matrix_allocate_host(A); matrix_allocate_cuda(A); } } __host__ int matrix_free(Matrix *A) { int d = matrix_free_cuda(A); int h = matrix_free_host(A); if (d || h) {printf("Unable to free matrix"); return -1;} return 0; } void print_matrix(Matrix *A) { int row, col; for (row = 1; row <= A->rows; row++) { for (col = 1; col <= A->cols; col++) { printf("%lf,", ELEMENT_H(A, row, col)); } printf("\n"); } } void print_matrix_d(Matrix *A) { int row, col; for (row = 1; row <= A->rows; row++) { for (col = 1; col <= A->cols; col++) { printf("lol\n"); printf("%lf,", A->data_d[col-1*A->rows + (row-1)]);//ELEMENT_D(A, row, col)); } printf("\n"); } }
c33f554b604919c7cc7d61a463d0ca012ea229bd.cu
/* Copyright 2019, Aman Gupta, ENG EC 527, Prof. Martin Herbordt */ /******************************************************************************/ /* Matrix library implementation for CUDA in C */ /******************************************************************************/ /* Libraries */ /******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "matrix.cuh" #include "cuda_utils.cuh" /******************************************************************************/ /* Implementations */ /******************************************************************************/ __host__ Matrix * matrix_init(int rows, int cols) { // small check to ensure rows and cols are positive numbers if (rows <= 0 || cols <= 0) return NULL; Matrix *m = (Matrix *)malloc(sizeof(Matrix)); m -> rows = rows; m -> cols = cols; m -> data_d = NULL; m -> data_h = NULL; m -> device_assigned = false; m -> host_assigned = false; return m; } __host__ void matrix_allocate_host(Matrix *A) { if (!A->host_assigned) { data_t *data_h = (data_t *)calloc(A->rows*A->cols,sizeof(data_t)); if (!data_h) {printf("Unable to allocate matrix\n"); exit(-1);} A->data_h = data_h; A->host_assigned = true; } } __host__ Matrix *copy_matrix(Matrix *X) { Matrix *Z = matrix_init(X->rows, X->cols); matrix_allocate(Z, X->rows, X->cols); memcpy(Z->data_h, X->data_h, X->rows*X->cols*sizeof(data_t)); copy_matrix_H2D(Z); return Z; } __host__ int matrix_free_host(Matrix *A) { if (!A) {printf("freeing NULL pointer\n"); return -1;} assert(A->data_h); free(A->data_h); free(A); return 0; } __host__ void matrix_allocate(Matrix *A, int rows, int cols) { if (!A->device_assigned && !A->host_assigned) { A->rows = rows; A->cols = cols; matrix_allocate_host(A); matrix_allocate_cuda(A); } } __host__ int matrix_free(Matrix *A) { int d = matrix_free_cuda(A); int h = matrix_free_host(A); if (d || h) {printf("Unable to free matrix"); return -1;} return 0; } void print_matrix(Matrix *A) { int row, col; for (row = 1; row <= A->rows; row++) { for (col = 1; col <= A->cols; col++) { printf("%lf,", ELEMENT_H(A, row, col)); } printf("\n"); } } void print_matrix_d(Matrix *A) { int row, col; for (row = 1; row <= A->rows; row++) { for (col = 1; col <= A->cols; col++) { printf("lol\n"); printf("%lf,", A->data_d[col-1*A->rows + (row-1)]);//ELEMENT_D(A, row, col)); } printf("\n"); } }
a470baf43c10f150ecc98d5e9d619a4cbed10bac.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> typedef double FLOAT; __global__ void sum(FLOAT *x) { int tid = threadIdx.x; x[tid] += 1; } int main() { int N = 32; int nbytes = N * sizeof(FLOAT); FLOAT *dx = NULL, *hx = NULL; int i; /* allocate GPU mem */ hipMalloc((void **)&dx, nbytes); if (dx == NULL) { printf("couldn't allocate GPU memory\n"); return -1; } /* alllocate CPU host mem: memory copy is faster than malloc */ hipHostMalloc((void **)&hx, nbytes); if (hx == NULL) { printf("couldn't allocate CPU memory\n"); return -2; } /* init */ printf("hx original: \n"); for (i = 0; i < N; i++) { hx[i] = i; printf("%g\n", hx[i]); } /* copy data to GPU */ hipMemcpy(dx, hx, nbytes, hipMemcpyHostToDevice); /* call GPU */ hipLaunchKernelGGL(( sum), dim3(1), dim3(N), 0, 0, dx); /* let GPU finish */ hipDeviceSynchronize(); /* copy data from GPU */ hipMemcpy(hx, dx, nbytes, hipMemcpyDeviceToHost); printf("\nhx from GPU: \n"); for (i = 0; i < N; i++) { printf("%g\n", hx[i]); } hipFree(dx); hipHostFree(hx); return 0; }
a470baf43c10f150ecc98d5e9d619a4cbed10bac.cu
#include <stdio.h> #include <cuda.h> typedef double FLOAT; __global__ void sum(FLOAT *x) { int tid = threadIdx.x; x[tid] += 1; } int main() { int N = 32; int nbytes = N * sizeof(FLOAT); FLOAT *dx = NULL, *hx = NULL; int i; /* allocate GPU mem */ cudaMalloc((void **)&dx, nbytes); if (dx == NULL) { printf("couldn't allocate GPU memory\n"); return -1; } /* alllocate CPU host mem: memory copy is faster than malloc */ cudaMallocHost((void **)&hx, nbytes); if (hx == NULL) { printf("couldn't allocate CPU memory\n"); return -2; } /* init */ printf("hx original: \n"); for (i = 0; i < N; i++) { hx[i] = i; printf("%g\n", hx[i]); } /* copy data to GPU */ cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice); /* call GPU */ sum<<<1, N>>>(dx); /* let GPU finish */ cudaThreadSynchronize(); /* copy data from GPU */ cudaMemcpy(hx, dx, nbytes, cudaMemcpyDeviceToHost); printf("\nhx from GPU: \n"); for (i = 0; i < N; i++) { printf("%g\n", hx[i]); } cudaFree(dx); cudaFreeHost(hx); return 0; }
b407531d89e36fdaa6c28c75a9e3f2400f676934.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "stdio.h" #include "stdlib.h" #include <string.h> #include "algorithmCudaNormal.h" #include "algorithmCudaNormalInternal.h" namespace AlgorithmCudaNormal { #if 0 } // indent guard #endif __forceinline__ __device__ void updateCell(int* matDst, int* matSrc, int globalIndex, int cnt) { if (matSrc[globalIndex] == 0) { if (cnt == 3) { // birth matDst[globalIndex] = 1; } else { // keep dead matDst[globalIndex] = 0; } } else { if (cnt <= 2 || cnt >= 5) { // die matDst[globalIndex] = 0; } else { // keep alive (age++) matDst[globalIndex] = matSrc[globalIndex] + 1; } } } __global__ void loop_0_stream(int* matDst, int *matSrc, int width, int height, int offsetY) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; y += offsetY; int cnt = 0; for (int yy = y - 1; yy <= y + 1; yy++) { int roundY = yy; if (roundY >= height) roundY = 0; if (roundY < 0) roundY = height - 1; for (int xx = x - 1; xx <= x + 1; xx++) { int roundX = xx; if (roundX >= width) roundX = 0; if (roundX < 0) roundX = width - 1; if (matSrc[width * roundY + roundX] != 0) { cnt++; } } } updateCell(matDst, matSrc, y * width + x, cnt); } /* The most basic algorithm * with stream (divide area into several landscape area, and each stream processes each area) */ void process_0_stream(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height) { dim3 block(BLOCK_SIZE_W, BLOCK_SIZE_H); dim3 grid(width / BLOCK_SIZE_W, height / BLOCK_SIZE_H / NUM_STREAM); int heightStream = ceil((double)height / NUM_STREAM); /* copy border line data at first to simplyfy logic of each stream (no need to consider border line) */ #if !defined(USE_ZEROCOPY_MEMORY) for (int i = 0; i < NUM_STREAM; i++) { int offsetFirstLine = (i * height / NUM_STREAM) * width; CHECK(hipMemcpy(param->devMatSrc + offsetFirstLine, param->hostMatSrc + offsetFirstLine, width * sizeof(int), hipMemcpyHostToDevice)); int offsetLastLine = ((i + 1) * height / NUM_STREAM - 1) * width; CHECK(hipMemcpy(param->devMatSrc + offsetLastLine, param->hostMatSrc + offsetLastLine, width * sizeof(int), hipMemcpyHostToDevice)); } #endif /* create stream(copy(h2d), kernel, copy(d2h)) */ for (int i = 0; i < NUM_STREAM; i++) { hipStream_t* pStream = (hipStream_t*)(param->pStream[i]); int offsetY = i * heightStream; #if !defined(USE_ZEROCOPY_MEMORY) CHECK(hipMemcpyAsync(param->devMatSrc + offsetY * width, param->hostMatSrc + offsetY * width, width * heightStream * sizeof(int), hipMemcpyHostToDevice, *pStream)); #endif loop_0_stream << < grid, block, 0, *pStream >> > (param->devMatDst, param->devMatSrc, width, height, offsetY); #if !defined(USE_ZEROCOPY_MEMORY) CHECK(hipMemcpyAsync(param->hostMatDst + offsetY * width, param->devMatDst + offsetY * width, width * heightStream * sizeof(int), hipMemcpyDeviceToHost, *pStream)); #endif } for (int i = 0; i < NUM_STREAM; i++) { hipStream_t* pStream = (hipStream_t*)(param->pStream[i]); CHECK(hipStreamSynchronize(*pStream)); } swapMat(param); // hostMatSrc is ready to be displayed } }
b407531d89e36fdaa6c28c75a9e3f2400f676934.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "stdio.h" #include "stdlib.h" #include <string.h> #include "algorithmCudaNormal.h" #include "algorithmCudaNormalInternal.h" namespace AlgorithmCudaNormal { #if 0 } // indent guard #endif __forceinline__ __device__ void updateCell(int* matDst, int* matSrc, int globalIndex, int cnt) { if (matSrc[globalIndex] == 0) { if (cnt == 3) { // birth matDst[globalIndex] = 1; } else { // keep dead matDst[globalIndex] = 0; } } else { if (cnt <= 2 || cnt >= 5) { // die matDst[globalIndex] = 0; } else { // keep alive (age++) matDst[globalIndex] = matSrc[globalIndex] + 1; } } } __global__ void loop_0_stream(int* matDst, int *matSrc, int width, int height, int offsetY) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; y += offsetY; int cnt = 0; for (int yy = y - 1; yy <= y + 1; yy++) { int roundY = yy; if (roundY >= height) roundY = 0; if (roundY < 0) roundY = height - 1; for (int xx = x - 1; xx <= x + 1; xx++) { int roundX = xx; if (roundX >= width) roundX = 0; if (roundX < 0) roundX = width - 1; if (matSrc[width * roundY + roundX] != 0) { cnt++; } } } updateCell(matDst, matSrc, y * width + x, cnt); } /* The most basic algorithm * with stream (divide area into several landscape area, and each stream processes each area) */ void process_0_stream(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height) { dim3 block(BLOCK_SIZE_W, BLOCK_SIZE_H); dim3 grid(width / BLOCK_SIZE_W, height / BLOCK_SIZE_H / NUM_STREAM); int heightStream = ceil((double)height / NUM_STREAM); /* copy border line data at first to simplyfy logic of each stream (no need to consider border line) */ #if !defined(USE_ZEROCOPY_MEMORY) for (int i = 0; i < NUM_STREAM; i++) { int offsetFirstLine = (i * height / NUM_STREAM) * width; CHECK(cudaMemcpy(param->devMatSrc + offsetFirstLine, param->hostMatSrc + offsetFirstLine, width * sizeof(int), cudaMemcpyHostToDevice)); int offsetLastLine = ((i + 1) * height / NUM_STREAM - 1) * width; CHECK(cudaMemcpy(param->devMatSrc + offsetLastLine, param->hostMatSrc + offsetLastLine, width * sizeof(int), cudaMemcpyHostToDevice)); } #endif /* create stream(copy(h2d), kernel, copy(d2h)) */ for (int i = 0; i < NUM_STREAM; i++) { cudaStream_t* pStream = (cudaStream_t*)(param->pStream[i]); int offsetY = i * heightStream; #if !defined(USE_ZEROCOPY_MEMORY) CHECK(cudaMemcpyAsync(param->devMatSrc + offsetY * width, param->hostMatSrc + offsetY * width, width * heightStream * sizeof(int), cudaMemcpyHostToDevice, *pStream)); #endif loop_0_stream << < grid, block, 0, *pStream >> > (param->devMatDst, param->devMatSrc, width, height, offsetY); #if !defined(USE_ZEROCOPY_MEMORY) CHECK(cudaMemcpyAsync(param->hostMatDst + offsetY * width, param->devMatDst + offsetY * width, width * heightStream * sizeof(int), cudaMemcpyDeviceToHost, *pStream)); #endif } for (int i = 0; i < NUM_STREAM; i++) { cudaStream_t* pStream = (cudaStream_t*)(param->pStream[i]); CHECK(cudaStreamSynchronize(*pStream)); } swapMat(param); // hostMatSrc is ready to be displayed } }
50c9d0d3c4cfe300c245977af9562a3ff07ac923.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #include <rocm_smi/rocm_smi.h> #include <assert.h> #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, double * __restrict__ __var_4__){ int FORMA_BLOCKDIM_Z = (int)(blockDim.z); int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_0__; __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_0__ <= (N-2)){ int __iter_1__; __iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_1__ <= (M-2)){ int __iter_2__; __iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1; if(__iter_2__ <= (L-2)){ double __temp_0__; __temp_0__ = (0.161000f * input[__iter_0__+(1)+N*(__iter_1__+M*(__iter_2__))]); double __temp_1__; __temp_1__ = (0.162000f * input[__iter_0__+(-1)+N*(__iter_1__+M*(__iter_2__))]); double __temp_2__; __temp_2__ = (__temp_0__ + __temp_1__); double __temp_3__; __temp_3__ = (0.163000f * input[__iter_0__+N*(__iter_1__+(1)+M*(__iter_2__))]); double __temp_4__; __temp_4__ = (__temp_2__ + __temp_3__); double __temp_5__; __temp_5__ = (0.164000f * input[__iter_0__+N*(__iter_1__+(-1)+M*(__iter_2__))]); double __temp_6__; __temp_6__ = (__temp_4__ + __temp_5__); double __temp_7__; __temp_7__ = (0.165000f * input[__iter_0__+N*(__iter_1__+M*(__iter_2__+(1)))]); double __temp_8__; __temp_8__ = (__temp_6__ + __temp_7__); double __temp_9__; __temp_9__ = (0.166000f * input[__iter_0__+N*(__iter_1__+M*(__iter_2__+(-1)))]); double __temp_10__; __temp_10__ = (__temp_8__ + __temp_9__); double __temp_11__; __temp_11__ = (1.670000f * input[__iter_0__+N*(__iter_1__+M*(__iter_2__))]); double __temp_12__; __temp_12__ = (__temp_10__ - __temp_11__); __var_4__[__iter_0__+N*(__iter_1__+M*(__iter_2__))] = __temp_12__; } } } } /*Device code End */ /* Host Code Begin */ extern "C" void j3d7pt(double * h_input, int L, int M, int N, double * __var_0__){ /* Host allocation Begin */ double * input; hipMalloc(&input,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(double)*(L*M*N), memcpy_kind_h_input); } double * __var_1__; hipMalloc(&__var_1__,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); double * __var_2__; hipMalloc(&__var_2__,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1; int __size_1___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1; int __size_2___kernel___forma_kernel__0__ = ((L-2) - 1 ) + 1; int __block_0___kernel___forma_kernel__0__ = 16; int __block_1___kernel___forma_kernel__0__ = 4; int __block_2___kernel___forma_kernel__0__ = 4; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); unsigned int power1, power2; rsmi_status_t result; uint32_t device; nvmlEnableState_t mode; result=nvmlInit(); result = nvmlDeviceGetHandleByIndex(0, &device); assert(RSMI_STATUS_SUCCESS == result); result=nvmlDeviceGetPowerManagementMode(device, &mode); printf("enabled = %d\n", mode); result=nvmlDeviceGetPowerUsage(device,&power1); assert(RSMI_STATUS_SUCCESS == result); hipDeviceSynchronize(); for (int x=0; x<500; x++) { hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __var_2__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __var_1__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_1__, L, M, N, __var_2__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __var_1__); } hipDeviceSynchronize(); result=nvmlDeviceGetPowerUsage(device,&power2); assert(RSMI_STATUS_SUCCESS == result); power2 -= power1; printf("%u\n", power2); nvmlShutdown(); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(double)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); hipFree(__var_2__); } /*Host Free End*/
50c9d0d3c4cfe300c245977af9562a3ff07ac923.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #include <nvml.h> #include <assert.h> #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, double * __restrict__ __var_4__){ int FORMA_BLOCKDIM_Z = (int)(blockDim.z); int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int __iter_0__; __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1; if(__iter_0__ <= (N-2)){ int __iter_1__; __iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1; if(__iter_1__ <= (M-2)){ int __iter_2__; __iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1; if(__iter_2__ <= (L-2)){ double __temp_0__; __temp_0__ = (0.161000f * input[__iter_0__+(1)+N*(__iter_1__+M*(__iter_2__))]); double __temp_1__; __temp_1__ = (0.162000f * input[__iter_0__+(-1)+N*(__iter_1__+M*(__iter_2__))]); double __temp_2__; __temp_2__ = (__temp_0__ + __temp_1__); double __temp_3__; __temp_3__ = (0.163000f * input[__iter_0__+N*(__iter_1__+(1)+M*(__iter_2__))]); double __temp_4__; __temp_4__ = (__temp_2__ + __temp_3__); double __temp_5__; __temp_5__ = (0.164000f * input[__iter_0__+N*(__iter_1__+(-1)+M*(__iter_2__))]); double __temp_6__; __temp_6__ = (__temp_4__ + __temp_5__); double __temp_7__; __temp_7__ = (0.165000f * input[__iter_0__+N*(__iter_1__+M*(__iter_2__+(1)))]); double __temp_8__; __temp_8__ = (__temp_6__ + __temp_7__); double __temp_9__; __temp_9__ = (0.166000f * input[__iter_0__+N*(__iter_1__+M*(__iter_2__+(-1)))]); double __temp_10__; __temp_10__ = (__temp_8__ + __temp_9__); double __temp_11__; __temp_11__ = (1.670000f * input[__iter_0__+N*(__iter_1__+M*(__iter_2__))]); double __temp_12__; __temp_12__ = (__temp_10__ - __temp_11__); __var_4__[__iter_0__+N*(__iter_1__+M*(__iter_2__))] = __temp_12__; } } } } /*Device code End */ /* Host Code Begin */ extern "C" void j3d7pt(double * h_input, int L, int M, int N, double * __var_0__){ /* Host allocation Begin */ double * input; cudaMalloc(&input,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(double)*(L*M*N), memcpy_kind_h_input); } double * __var_1__; cudaMalloc(&__var_1__,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); double * __var_2__; cudaMalloc(&__var_2__,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1; int __size_1___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1; int __size_2___kernel___forma_kernel__0__ = ((L-2) - 1 ) + 1; int __block_0___kernel___forma_kernel__0__ = 16; int __block_1___kernel___forma_kernel__0__ = 4; int __block_2___kernel___forma_kernel__0__ = 4; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); unsigned int power1, power2; nvmlReturn_t result; nvmlDevice_t device; nvmlEnableState_t mode; result=nvmlInit(); result = nvmlDeviceGetHandleByIndex(0, &device); assert(NVML_SUCCESS == result); result=nvmlDeviceGetPowerManagementMode(device, &mode); printf("enabled = %d\n", mode); result=nvmlDeviceGetPowerUsage(device,&power1); assert(NVML_SUCCESS == result); cudaDeviceSynchronize(); for (int x=0; x<500; x++) { __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __var_2__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __var_1__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_1__, L, M, N, __var_2__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __var_1__); } cudaDeviceSynchronize(); result=nvmlDeviceGetPowerUsage(device,&power2); assert(NVML_SUCCESS == result); power2 -= power1; printf("%u\n", power2); nvmlShutdown(); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(double)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); cudaFree(__var_2__); } /*Host Free End*/
7ef73c18f126f26e95df0091d09f302c0b10445c.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (C) 2011 Abhinav Jauhri ([email protected]), Carnegie Mellon University - Silicon Valley This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ //#include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "matrix_mul.h" #define TILE_WIDTH 2 namespace cuda { __global__ void matrix_mul_kernel(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, int sq_dimension) { int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0.0f; // printf("sq_dimension %d\n", (size_t)sq_dimension); for(int k = 0; k < sq_dimension; k++) { sum += sq_matrix_1[ty*sq_dimension + k] * sq_matrix_2[k*sq_dimension + tx]; } sq_matrix_result[ty*sq_dimension + tx] = sum; } void matrix_multiplication(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, unsigned int sq_dimension) { int size = sq_dimension * sq_dimension * sizeof(float); float *sq_matrix_1_d, *sq_matrix_2_d, *sq_matrix_result_d; /*************************************************** 1st Part: Allocation of memory on device memory ****************************************************/ /* copy sq_matrix_1 and sq_matrix_2 to device memory */ hipMalloc((void**) &sq_matrix_1_d, size); hipMemcpy(sq_matrix_1_d, sq_matrix_1, size, hipMemcpyHostToDevice); hipMalloc((void**) &sq_matrix_2_d, size); hipMemcpy(sq_matrix_2_d, sq_matrix_2, size, hipMemcpyHostToDevice); /*allocate sq_matrix_result on host */ hipMalloc((void**) &sq_matrix_result_d, size); /*************************************************** 2nd Part: Inovke kernel ****************************************************/ dim3 dimBlock(sq_dimension, sq_dimension); dim3 dimGrid(1,1); hipLaunchKernelGGL(( matrix_mul_kernel), dim3(dimGrid), dim3(dimBlock), dimBlock.x * dimBlock.x * sizeof(float), 0, sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension); /*************************************************** 3rd Part: Transfer result from device to host ****************************************************/ hipMemcpy(sq_matrix_result, sq_matrix_result_d, size, hipMemcpyDeviceToHost); hipFree(sq_matrix_1_d); hipFree(sq_matrix_2_d); hipFree(sq_matrix_result_d); } } // namespace cuda
7ef73c18f126f26e95df0091d09f302c0b10445c.cu
/* Copyright (C) 2011 Abhinav Jauhri ([email protected]), Carnegie Mellon University - Silicon Valley This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ //#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include "matrix_mul.h" #define TILE_WIDTH 2 namespace cuda { __global__ void matrix_mul_kernel(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, int sq_dimension) { int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0.0f; // printf("sq_dimension %d\n", (size_t)sq_dimension); for(int k = 0; k < sq_dimension; k++) { sum += sq_matrix_1[ty*sq_dimension + k] * sq_matrix_2[k*sq_dimension + tx]; } sq_matrix_result[ty*sq_dimension + tx] = sum; } void matrix_multiplication(float *sq_matrix_1, float *sq_matrix_2, float *sq_matrix_result, unsigned int sq_dimension) { int size = sq_dimension * sq_dimension * sizeof(float); float *sq_matrix_1_d, *sq_matrix_2_d, *sq_matrix_result_d; /*************************************************** 1st Part: Allocation of memory on device memory ****************************************************/ /* copy sq_matrix_1 and sq_matrix_2 to device memory */ cudaMalloc((void**) &sq_matrix_1_d, size); cudaMemcpy(sq_matrix_1_d, sq_matrix_1, size, cudaMemcpyHostToDevice); cudaMalloc((void**) &sq_matrix_2_d, size); cudaMemcpy(sq_matrix_2_d, sq_matrix_2, size, cudaMemcpyHostToDevice); /*allocate sq_matrix_result on host */ cudaMalloc((void**) &sq_matrix_result_d, size); /*************************************************** 2nd Part: Inovke kernel ****************************************************/ dim3 dimBlock(sq_dimension, sq_dimension); dim3 dimGrid(1,1); matrix_mul_kernel<<<dimGrid, dimBlock, dimBlock.x * dimBlock.x * sizeof(float)>>>(sq_matrix_1_d, sq_matrix_2_d, sq_matrix_result_d, sq_dimension); /*************************************************** 3rd Part: Transfer result from device to host ****************************************************/ cudaMemcpy(sq_matrix_result, sq_matrix_result_d, size, cudaMemcpyDeviceToHost); cudaFree(sq_matrix_1_d); cudaFree(sq_matrix_2_d); cudaFree(sq_matrix_result_d); } } // namespace cuda
62419e4ffddb5e7f9dae0e6d848a0ebf9aa9b424.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaLib.cuh" #include <stdio.h> #include <vector> #include <memory> #include "integralImage.h" namespace IncrementScan { static const int WARP_SIZE = 32; static const int BLOCK_SIZE = WARP_SIZE; /************************************************************ ScanColumn algorithm ************************************************************/ template<typename TSrc, typename TDst, uint SCAN_TYPE, uint COUNT_PER_THREAD, uint WARP_COUNT> __global__ void ScanY(const TSrc* __restrict dataIn, TDst* dataOut, uint width, uint widthStride, uint height) { __shared__ TDst sMem[WARP_COUNT + 1][WARP_SIZE]; uint warpId = threadIdx.y; uint laneId = threadIdx.x; uint tidx = blockIdx.x*blockDim.x + threadIdx.x; uint tidy = (blockIdx.y*blockDim.y + threadIdx.y)*COUNT_PER_THREAD; uint PROCESS_COUNT_Y = COUNT_PER_THREAD*blockDim.y; if (tidx >= width) return; TDst data[COUNT_PER_THREAD]; for (uint y = tidy; y < height; y += PROCESS_COUNT_Y) { if (y != tidy) { if (warpId == WARP_COUNT - 1) sMem[0][laneId] = data[COUNT_PER_THREAD - 1]; __syncthreads(); } uint index = y*widthStride + tidx; { //1,load data uint yy = y; uint idx = index; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { if (yy < height) { data[i] = ldg(&dataIn[idx]); idx += widthStride; yy++; } } } { //2, increament prefix sum #pragma unroll for (int i = 1; i < COUNT_PER_THREAD; i++) { data[i] += data[i - 1]; } sMem[warpId + 1][laneId] = data[COUNT_PER_THREAD - 1]; __syncthreads(); } { #if 1 //can be improved if (warpId == 0) { #if 1 TDst s = 0; if (y != 0) { s = sMem[0][laneId]; } #pragma unroll for (int i = 1; i < WARP_COUNT + 1; i++) { s += sMem[i][laneId]; sMem[i][laneId] = s; } #else if (y != 0) { sMem[1][laneId] += sMem[0][laneId]; } //#pragma unroll for (int i = 2; i < WARP_COUNT + 1; i++) { sMem[i][laneId] += sMem[i - 1][laneId]; } #endif } #else for (uint wid = warpId; wid < WARP_SIZE; wid += WARP_COUNT) { TDst s = 0; if (laneId < WARP_COUNT) s = sMem[laneId + 1][wid]; if (y > tidy && laneId == 0) { s += sMem[0][wid]; } if (SCAN_TYPE == KoggeStone_SCAN) { #pragma unroll for (int i = 1; i <= 32; i <<= 1) { /*the first row of the matrix*/ const TDst val = __shfl_up(s, i); if (laneId >= i) { s += val; } } }else if (SCAN_TYPE == LF_SCAN) { #pragma unroll for (int i = 1; i <= 32; i <<= 1) { const TDst val = __shfl(s, i - 1, i << 1); if ((laneId & ((i << 1) - 1)) >= i) { s += val; } } } if (laneId < WARP_COUNT) sMem[laneId + 1][wid] = s; } #endif __syncthreads(); } if (y != 0) { TDst sum = sMem[warpId][laneId]; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { data[i] += sum; } } { //store uint yy = y; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { if (yy < height) { dataOut[index] = data[i]; index += widthStride; yy++; } } } __syncthreads(); } } /************************************************************ ScanRow algorithm ************************************************************/ template<typename TSrc, typename TDst, int SCAN_TYPE, int BLOCK_DIM_X, int BLOCK_DIM_Y, int COUNT_PER_THREAD> __global__ void ScanX(const TSrc* __restrict dataIn, TDst* dataOut, uint width, uint widthStride, uint height) { TDst data[COUNT_PER_THREAD]; uint warpIdX = threadIdx.x / WARP_SIZE; uint warpIdY = threadIdx.y; uint laneId = threadIdx.x & (WARP_SIZE - 1); const uint WARP_COUNT_X = BLOCK_DIM_X / WARP_SIZE; const uint WARP_COUNT_Y = BLOCK_DIM_Y; const uint WARP_PROCESS_COUNT_X = WARP_SIZE*COUNT_PER_THREAD; const uint BLOCK_PROCWSS_COUNT_X = COUNT_PER_THREAD*BLOCK_DIM_X; uint tidx = WARP_PROCESS_COUNT_X*warpIdX + laneId; uint tidy = (blockIdx.y*blockDim.y + threadIdx.y); __shared__ TDst sMem[WARP_COUNT_Y][WARP_COUNT_X + 1]; for (uint x = tidx; x < width; x += BLOCK_PROCWSS_COUNT_X) { if (x != tidx) { if (threadIdx.x == 0) sMem[warpIdY][0] = sMem[warpIdY][WARP_COUNT_X]; __syncthreads(); } uint index = tidy*widthStride + x; { //1, load data uint xx = x; uint idx = index; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { if (xx < width) { data[i] = dataIn[idx]; idx += WARP_SIZE; xx += WARP_SIZE; } } } { //2, scan x #pragma unroll for (int j = 0; j < COUNT_PER_THREAD; j++) { if (j > 0) { const TDst sum = __shfl(data[j - 1], WARP_SIZE - 1); if (laneId == 0) { data[j] += sum; } } if (SCAN_TYPE == KoggeStone_SCAN) { //KoggeStone_SCAN algorithm #pragma unroll for (int i = 1; i <= 32; i <<= 1) { /*the first row of the matrix*/ const TDst val = __shfl_up(data[j], i); if (laneId >= i) { data[j] += val; } } }else if (SCAN_TYPE == LF_SCAN) { //LF_Scan algorithm #pragma unroll for (int i = 1; i <= 32; i <<= 1) { const TDst val = __shfl(data[j], i - 1, i << 1); if ((laneId & ((i << 1) - 1)) >= i) { data[j] += val; } } } if (laneId == WARP_SIZE - 1) { sMem[warpIdY][warpIdX + 1] = data[COUNT_PER_THREAD - 1]; } } __syncthreads(); } { //scan partial sum if (warpIdX == 0) { TDst s = 0; if (laneId < WARP_COUNT_X) s = sMem[warpIdY][laneId + 1]; if (x != tidx && laneId == 0) s += sMem[warpIdY][0]; if (SCAN_TYPE == KoggeStone_SCAN) { #pragma unroll for (int i = 1; i <= 32; i <<= 1) { /*the first row of the matrix*/ const TDst val = __shfl_up(s, i); if (laneId >= i) { s += val; } } }else if (SCAN_TYPE == LF_SCAN) { #pragma unroll for (int i = 1; i <= 32; i <<= 1) { const TDst val = __shfl(s, i - 1, i << 1); if ((laneId & ((i << 1) - 1)) >= i) { s += val; } } } if (laneId < WARP_COUNT_X) sMem[warpIdY][laneId + 1] = s; } __syncthreads(); } { if (x >= WARP_PROCESS_COUNT_X) { TDst sum = sMem[warpIdY][warpIdX]; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { data[i] += sum; } } } { //save data uint xx = x; uint idx = index; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { if (xx < width) { dataOut[idx] = data[i]; idx += WARP_SIZE; xx += WARP_SIZE; } } } } } // void TestX(int width, int height) { // std::cout << __FUNCTION__ << std::endl; // std::cout << "begin : TestIncrementScan" << std::endl; // float inc = 0; // hipEvent_t start, stop; // hipEventCreate(&start); // hipEventCreate(&stop); // // typedef float DataType; // // const uint THREAD_COUNT_PER_BLOCK = 1024; // const int BLOCK_DIM_X = 512; // const int BLOCK_DIM_Y = THREAD_COUNT_PER_BLOCK / BLOCK_DIM_X; // const int COUNT_PER_THREAD = 4; // // //const uint BLOCK_SIZE = 32; // // //const uint BLOCK_DIM_X = 256 * 4; // //int width = 1024 * 2; // //int height = 1024 * 2; // int size = width*height; // std::vector<DataType> vecA(size), vecB(size); // //for (int i = 0; i < height-16; i += 32) std::fill(vecA.begin()+i*width, vecA.begin() + (i+16)*width, 1); // // std::fill(vecA.begin(), vecA.end(), 1); // // // DevData<DataType> devA(width, height), devB(width, height), devTmp(height, width); // devA.CopyFromHost(&vecA[0], width, width, height); // // DevStream SM; // //const int PROCESS_COUNT_PER_THREAD_Y = 32; // //const int WARP_COUNT = THREAD_COUNT_PER_BLOCK / WARP_SIZE; // const dim3 block_sizeX(BLOCK_DIM_X, BLOCK_DIM_Y); // dim3 grid_sizeX(1, UpDivide(height, block_sizeX.y)); // // //dim3 grid_size1(1, UpDivide(height, BLOCK_SIZE)); // //dim3 grid_size2(1, UpDivide(width, BLOCK_SIZE)); // float tm = 0; // //tm = timeGetTime(); // hipEventRecord(start, 0); // IncrementScan::ScanX<DataType, BLOCK_DIM_X, BLOCK_DIM_Y, COUNT_PER_THREAD> << <grid_sizeX, block_sizeX >> > (devA.GetData(), devB.GetData(), width, devA.DataPitch(), height); // //IncrementScan::IncrementScan<DataType, BLOCK_SIZE, 4 * sizeof(uint) / sizeof(DataType), BLOCK_DIM_X> << <grid_size1, block_size, 0, SM.stream >> > (devA.GetData(), devTmp.GetData(), width, width, height, height); // //IncrementScan::IncrementScan<DataType, BLOCK_SIZE, 4 * sizeof(uint) / sizeof(DataType), BLOCK_DIM_X> << <grid_size2, block_size, 0, SM.stream >> > (devTmp.GetData(), devB.GetData(), height, height, width, width); // hipDeviceSynchronize(); // hipEventRecord(stop, 0); // //CUDA_CHECK_ERROR; // // // //tm = timeGetTime() - tm; // // hipEventSynchronize(stop); // hipEventElapsedTime(&inc, start, stop); // // devB.CopyToHost(&vecB[0], width, width, height); // printf("%d, %d, total time = %f, %f\n", width, height, tm, inc); // //cudaSyncDevice(); // std::cout << "end : TestSerielScan" << std::endl; //#if 0 // FILE* fp = fopen("d:/ints.raw", "wb"); // if (fp) { // fwrite(&vecB[0], sizeof(vecB[0]), width*height, fp); // fclose(fp); // } //#endif // FILE* flog = fopen("d:/log.csv", "wt"); // if (flog) { // for (int i = 0; i < vecB.size(); i++) { // DataType* p = &vecB[0]; // fprintf(flog, "%.2f ", p[i]); // if (i % width == (width - 1)) // fprintf(flog, "\n"); // fflush(flog); // } // fclose(flog); // } // // // } // void TestY(int width, int height) { // DISPLAY_FUNCTION; // std::cout << __FUNCTION__ << std::endl; // std::cout << "begin : TestIncrementScan" << std::endl; // float inc = 0; // hipEvent_t start, stop; // hipEventCreate(&start); // hipEventCreate(&stop); // // typedef float DataType; // // //const uint BLOCK_SIZE = 32; // const uint THREAD_COUNT_PER_BLOCK = 1024; // //const uint BLOCK_DIM_X = 256 * 4; // //int width = 1024 * 2; // //int height = 1024 * 2; // int size = width*height; // std::vector<DataType> vecA(size), vecB(size); // //for (int i = 0; i < height-16; i += 32) std::fill(vecA.begin()+i*width, vecA.begin() + (i+16)*width, 1); // // std::fill(vecA.begin(), vecA.end(), 1); // // // DevData<DataType> devA(width, height), devB(width, height), devTmp(height, width); // devA.CopyFromHost(&vecA[0], width, width, height); // // DevStream SM; // const int PROCESS_COUNT_PER_THREAD_Y = 32; // const int WARP_COUNT = THREAD_COUNT_PER_BLOCK / WARP_SIZE; // const dim3 block_sizeY(WARP_SIZE, WARP_COUNT); // dim3 grid_sizeY(UpDivide(width, block_sizeY.x), 1); // // //dim3 grid_size1(1, UpDivide(height, BLOCK_SIZE)); // //dim3 grid_size2(1, UpDivide(width, BLOCK_SIZE)); // float tm = 0; // //tm = timeGetTime(); // hipEventRecord(start, 0); // IncrementScan::ScanY<DataType, PROCESS_COUNT_PER_THREAD_Y, WARP_COUNT> << <grid_sizeY, block_sizeY >> > (devA.GetData(), devB.GetData(), width, devA.DataPitch(), height); // //IncrementScan::IncrementScan<DataType, BLOCK_SIZE, 4 * sizeof(uint) / sizeof(DataType), BLOCK_DIM_X> << <grid_size1, block_size, 0, SM.stream >> > (devA.GetData(), devTmp.GetData(), width, width, height, height); // //IncrementScan::IncrementScan<DataType, BLOCK_SIZE, 4 * sizeof(uint) / sizeof(DataType), BLOCK_DIM_X> << <grid_size2, block_size, 0, SM.stream >> > (devTmp.GetData(), devB.GetData(), height, height, width, width); // hipDeviceSynchronize(); // hipEventRecord(stop, 0); // //CUDA_CHECK_ERROR; // // // //tm = timeGetTime() - tm; // // hipEventSynchronize(stop); // hipEventElapsedTime(&inc, start, stop); // // devB.CopyToHost(&vecB[0], width, width, height); // printf("%d, %d, total time = %f, %f\n", width, height, tm, inc); // //cudaSyncDevice(); // std::cout << "end : TestSerielScan" << std::endl; //#if 0 // FILE* fp = fopen("d:/ints.raw", "wb"); // if (fp) { // fwrite(&vecB[0], sizeof(vecB[0]), width*height, fp); // fclose(fp); // } //#endif //#if 1 // FILE* flog = fopen("d:/log.csv", "wt"); // if (flog) { // for (int i = 0; i < vecB.size(); i++) { // DataType* p = &vecB[0]; // fprintf(flog, "%.2f ", p[i]); // if (i % width == (width - 1)) // fprintf(flog, "\n"); // fflush(flog); // } // fclose(flog); // } //#endif // } template<typename TSrc, typename TDst, int SCAN_TYPE> void Test(int width, int height) { DISPLAY_FUNCTION; std::cout << GetDataType<TSrc>::name() << "-->" << GetDataType<TDst>::name() << ", ScanName=" << ScanName(SCAN_TYPE) << std::endl; const int REPEAT_COUNT = 1; float inc = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //typedef float DataType; dim3 block_sizeX, grid_sizeX; //X const uint THREAD_COUNT_PER_BLOCK = 1024*sizeof(float)/sizeof(TDst); const int BLOCK_DIM_X = 32; const int BLOCK_DIM_Y = THREAD_COUNT_PER_BLOCK / BLOCK_DIM_X; const int COUNT_PER_THREAD = 32; block_sizeX = dim3(BLOCK_DIM_X, BLOCK_DIM_Y); grid_sizeX = dim3(1, UpDivide(height, block_sizeX.y)); //Y dim3 block_sizeY, grid_sizeY; const int PROCESS_COUNT_PER_THREAD_Y = 32*1; const int WARP_COUNT = THREAD_COUNT_PER_BLOCK / WARP_SIZE; block_sizeY = dim3(WARP_SIZE, WARP_COUNT); grid_sizeY = dim3(UpDivide(width, block_sizeY.x), 1); int size = width*height; std::vector<TSrc> vecA(size); std::vector<TDst> vecB(size); //for (int i = 0; i < height-16; i += 32) std::fill(vecA.begin()+i*width, vecA.begin() + (i+16)*width, 1); #if 0 std::fill(vecA.begin(), vecA.end(), 1); #else for (int i = 0; i < vecA.size(); i++) vecA[i] = (TSrc)(abs(rand()) % 2); #endif DevData<TSrc> devA(width, height); DevData<TDst> devB(width, height), devTmp(height, width); devA.CopyFromHost(&vecA[0], width, width, height); ShowGridBlockDim("ScanX", grid_sizeX, block_sizeX); ShowGridBlockDim("ScanY", grid_sizeY, block_sizeY); float tm = 0; tm = timeGetTime(); hipEventRecord(start, 0); #pragma unroll for (int k=0; k<REPEAT_COUNT; k ++){ IncrementScan::ScanX<TSrc, TDst, SCAN_TYPE, BLOCK_DIM_X, BLOCK_DIM_Y, COUNT_PER_THREAD> << <grid_sizeX, block_sizeX >> > (devA.GetData(), devTmp.GetData(), width, devA.DataPitch(), height); IncrementScan::ScanY<TDst, TDst, SCAN_TYPE, PROCESS_COUNT_PER_THREAD_Y, WARP_COUNT> << <grid_sizeY, block_sizeY >> > (devTmp.GetData(), devB.GetData(), width, devA.DataPitch(), height); } hipDeviceSynchronize(); hipEventRecord(stop, 0); CUDA_CHECK_ERROR; tm = timeGetTime() - tm; hipEventSynchronize(stop); hipEventElapsedTime(&inc, start, stop); devB.CopyToHost(&vecB[0], width, width, height); printf("%d, %d, total time = %f, %f\n", width, height, tm, inc); //cudaSyncDevice(); { std::vector<TDst> vecTmp(size); IntegralImageSerial(&vecA[0], &vecTmp[0], width, height); bool bCmp = Compare(&vecB[0], &vecTmp[0], width, height); printf("compare = %s\n", bCmp ? "successed" : "failed"); } //SaveToRaw(StringFormat("./%d-%d.raw", width, height).c_str(), &vecB[0], width, height); //SaveToText("./tmp.txt", &vecTmp[0], devTmp.width, devTmp.height); //SaveToText("./vecB.txt", &vecB[0], width, height); } }; //ScanRowColumn void TestIncreamentScan(int argc, char** argv) { std::cout << "------------------------------------------------------" << std::endl; int nScanType = SCAN::LF_SCAN; int dType0 = DataType::TypeFLOAT32; int dType1 = DataType::TypeFLOAT32; int repeat = 1; #define Scan IncrementScan GetArgs(argc, argv, nScanType, dType0, dType1, repeat); #if 0 //for (int i=0; i<10; i++) Scan::Test<uint, double, SCAN::LF_SCAN>(1024 * 1, 1024 * 1); #else for (int i = 1; i <= repeat; i++) { int size = i * 1024; if (nScanType == SCAN::KoggeStone_SCAN) { if (dType0 == DataType::TypeUINT8) { if (dType1 == DataType::TypeUINT32) { Scan::Test<uchar, uint, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<uchar, float, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<uchar, double, SCAN::KoggeStone_SCAN>(size, size); } }if (dType0 == DataType::TypeINT8) { if (dType1 == DataType::TypeINT32) { Scan::Test<char, int, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<char, float, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<char, double, SCAN::KoggeStone_SCAN>(size, size); } } if (dType0 == DataType::TypeINT32) { if (dType1 == DataType::TypeINT32) { Scan::Test<int, int, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<int, double, SCAN::KoggeStone_SCAN>(size, size); } } else if (dType0 == DataType::TypeFLOAT32) { if (dType1 == DataType::TypeUINT32) { } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<float, float, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<float, double, SCAN::KoggeStone_SCAN>(size, size); } } else if (dType0 == DataType::TypeFLOAT64) { if (dType1 == DataType::TypeUINT32) { } else if (dType1 == DataType::TypeFLOAT32) { } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<double, double, SCAN::KoggeStone_SCAN>(size, size); } } } else { if (dType0 == DataType::TypeUINT8) { if (dType1 == DataType::TypeUINT32) { Scan::Test<uchar, uint, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<uchar, float, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<uchar, double, SCAN::LF_SCAN>(size, size); } }if (dType0 == DataType::TypeINT8) { if (dType1 == DataType::TypeINT32) { Scan::Test<char, int, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<char, float, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<char, double, SCAN::LF_SCAN>(size, size); } } if (dType0 == DataType::TypeINT32) { if (dType1 == DataType::TypeINT32) { Scan::Test<int, int, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<int, double, SCAN::LF_SCAN>(size, size); } } else if (dType0 == DataType::TypeFLOAT32) { if (dType1 == DataType::TypeUINT32) { } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<float, float, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<float, double, SCAN::LF_SCAN>(size, size); } } else if (dType0 == DataType::TypeFLOAT64) { if (dType1 == DataType::TypeUINT32) { } else if (dType1 == DataType::TypeFLOAT32) { } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<double, double, SCAN::LF_SCAN>(size, size); } } } } #undef Scan #endif std::cout << "------------------------------------------------------" << std::endl;; }
62419e4ffddb5e7f9dae0e6d848a0ebf9aa9b424.cu
#include "cudaLib.cuh" #include <stdio.h> #include <vector> #include <memory> #include "integralImage.h" namespace IncrementScan { static const int WARP_SIZE = 32; static const int BLOCK_SIZE = WARP_SIZE; /************************************************************ ScanColumn algorithm ************************************************************/ template<typename TSrc, typename TDst, uint SCAN_TYPE, uint COUNT_PER_THREAD, uint WARP_COUNT> __global__ void ScanY(const TSrc* __restrict dataIn, TDst* dataOut, uint width, uint widthStride, uint height) { __shared__ TDst sMem[WARP_COUNT + 1][WARP_SIZE]; uint warpId = threadIdx.y; uint laneId = threadIdx.x; uint tidx = blockIdx.x*blockDim.x + threadIdx.x; uint tidy = (blockIdx.y*blockDim.y + threadIdx.y)*COUNT_PER_THREAD; uint PROCESS_COUNT_Y = COUNT_PER_THREAD*blockDim.y; if (tidx >= width) return; TDst data[COUNT_PER_THREAD]; for (uint y = tidy; y < height; y += PROCESS_COUNT_Y) { if (y != tidy) { if (warpId == WARP_COUNT - 1) sMem[0][laneId] = data[COUNT_PER_THREAD - 1]; __syncthreads(); } uint index = y*widthStride + tidx; { //1,load data uint yy = y; uint idx = index; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { if (yy < height) { data[i] = ldg(&dataIn[idx]); idx += widthStride; yy++; } } } { //2, increament prefix sum #pragma unroll for (int i = 1; i < COUNT_PER_THREAD; i++) { data[i] += data[i - 1]; } sMem[warpId + 1][laneId] = data[COUNT_PER_THREAD - 1]; __syncthreads(); } { #if 1 //can be improved if (warpId == 0) { #if 1 TDst s = 0; if (y != 0) { s = sMem[0][laneId]; } #pragma unroll for (int i = 1; i < WARP_COUNT + 1; i++) { s += sMem[i][laneId]; sMem[i][laneId] = s; } #else if (y != 0) { sMem[1][laneId] += sMem[0][laneId]; } //#pragma unroll for (int i = 2; i < WARP_COUNT + 1; i++) { sMem[i][laneId] += sMem[i - 1][laneId]; } #endif } #else for (uint wid = warpId; wid < WARP_SIZE; wid += WARP_COUNT) { TDst s = 0; if (laneId < WARP_COUNT) s = sMem[laneId + 1][wid]; if (y > tidy && laneId == 0) { s += sMem[0][wid]; } if (SCAN_TYPE == KoggeStone_SCAN) { #pragma unroll for (int i = 1; i <= 32; i <<= 1) { /*the first row of the matrix*/ const TDst val = __shfl_up(s, i); if (laneId >= i) { s += val; } } }else if (SCAN_TYPE == LF_SCAN) { #pragma unroll for (int i = 1; i <= 32; i <<= 1) { const TDst val = __shfl(s, i - 1, i << 1); if ((laneId & ((i << 1) - 1)) >= i) { s += val; } } } if (laneId < WARP_COUNT) sMem[laneId + 1][wid] = s; } #endif __syncthreads(); } if (y != 0) { TDst sum = sMem[warpId][laneId]; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { data[i] += sum; } } { //store uint yy = y; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { if (yy < height) { dataOut[index] = data[i]; index += widthStride; yy++; } } } __syncthreads(); } } /************************************************************ ScanRow algorithm ************************************************************/ template<typename TSrc, typename TDst, int SCAN_TYPE, int BLOCK_DIM_X, int BLOCK_DIM_Y, int COUNT_PER_THREAD> __global__ void ScanX(const TSrc* __restrict dataIn, TDst* dataOut, uint width, uint widthStride, uint height) { TDst data[COUNT_PER_THREAD]; uint warpIdX = threadIdx.x / WARP_SIZE; uint warpIdY = threadIdx.y; uint laneId = threadIdx.x & (WARP_SIZE - 1); const uint WARP_COUNT_X = BLOCK_DIM_X / WARP_SIZE; const uint WARP_COUNT_Y = BLOCK_DIM_Y; const uint WARP_PROCESS_COUNT_X = WARP_SIZE*COUNT_PER_THREAD; const uint BLOCK_PROCWSS_COUNT_X = COUNT_PER_THREAD*BLOCK_DIM_X; uint tidx = WARP_PROCESS_COUNT_X*warpIdX + laneId; uint tidy = (blockIdx.y*blockDim.y + threadIdx.y); __shared__ TDst sMem[WARP_COUNT_Y][WARP_COUNT_X + 1]; for (uint x = tidx; x < width; x += BLOCK_PROCWSS_COUNT_X) { if (x != tidx) { if (threadIdx.x == 0) sMem[warpIdY][0] = sMem[warpIdY][WARP_COUNT_X]; __syncthreads(); } uint index = tidy*widthStride + x; { //1, load data uint xx = x; uint idx = index; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { if (xx < width) { data[i] = dataIn[idx]; idx += WARP_SIZE; xx += WARP_SIZE; } } } { //2, scan x #pragma unroll for (int j = 0; j < COUNT_PER_THREAD; j++) { if (j > 0) { const TDst sum = __shfl(data[j - 1], WARP_SIZE - 1); if (laneId == 0) { data[j] += sum; } } if (SCAN_TYPE == KoggeStone_SCAN) { //KoggeStone_SCAN algorithm #pragma unroll for (int i = 1; i <= 32; i <<= 1) { /*the first row of the matrix*/ const TDst val = __shfl_up(data[j], i); if (laneId >= i) { data[j] += val; } } }else if (SCAN_TYPE == LF_SCAN) { //LF_Scan algorithm #pragma unroll for (int i = 1; i <= 32; i <<= 1) { const TDst val = __shfl(data[j], i - 1, i << 1); if ((laneId & ((i << 1) - 1)) >= i) { data[j] += val; } } } if (laneId == WARP_SIZE - 1) { sMem[warpIdY][warpIdX + 1] = data[COUNT_PER_THREAD - 1]; } } __syncthreads(); } { //scan partial sum if (warpIdX == 0) { TDst s = 0; if (laneId < WARP_COUNT_X) s = sMem[warpIdY][laneId + 1]; if (x != tidx && laneId == 0) s += sMem[warpIdY][0]; if (SCAN_TYPE == KoggeStone_SCAN) { #pragma unroll for (int i = 1; i <= 32; i <<= 1) { /*the first row of the matrix*/ const TDst val = __shfl_up(s, i); if (laneId >= i) { s += val; } } }else if (SCAN_TYPE == LF_SCAN) { #pragma unroll for (int i = 1; i <= 32; i <<= 1) { const TDst val = __shfl(s, i - 1, i << 1); if ((laneId & ((i << 1) - 1)) >= i) { s += val; } } } if (laneId < WARP_COUNT_X) sMem[warpIdY][laneId + 1] = s; } __syncthreads(); } { if (x >= WARP_PROCESS_COUNT_X) { TDst sum = sMem[warpIdY][warpIdX]; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { data[i] += sum; } } } { //save data uint xx = x; uint idx = index; #pragma unroll for (int i = 0; i < COUNT_PER_THREAD; i++) { if (xx < width) { dataOut[idx] = data[i]; idx += WARP_SIZE; xx += WARP_SIZE; } } } } } // void TestX(int width, int height) { // std::cout << __FUNCTION__ << std::endl; // std::cout << "begin : TestIncrementScan" << std::endl; // float inc = 0; // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // // typedef float DataType; // // const uint THREAD_COUNT_PER_BLOCK = 1024; // const int BLOCK_DIM_X = 512; // const int BLOCK_DIM_Y = THREAD_COUNT_PER_BLOCK / BLOCK_DIM_X; // const int COUNT_PER_THREAD = 4; // // //const uint BLOCK_SIZE = 32; // // //const uint BLOCK_DIM_X = 256 * 4; // //int width = 1024 * 2; // //int height = 1024 * 2; // int size = width*height; // std::vector<DataType> vecA(size), vecB(size); // //for (int i = 0; i < height-16; i += 32) std::fill(vecA.begin()+i*width, vecA.begin() + (i+16)*width, 1); // // std::fill(vecA.begin(), vecA.end(), 1); // // // DevData<DataType> devA(width, height), devB(width, height), devTmp(height, width); // devA.CopyFromHost(&vecA[0], width, width, height); // // DevStream SM; // //const int PROCESS_COUNT_PER_THREAD_Y = 32; // //const int WARP_COUNT = THREAD_COUNT_PER_BLOCK / WARP_SIZE; // const dim3 block_sizeX(BLOCK_DIM_X, BLOCK_DIM_Y); // dim3 grid_sizeX(1, UpDivide(height, block_sizeX.y)); // // //dim3 grid_size1(1, UpDivide(height, BLOCK_SIZE)); // //dim3 grid_size2(1, UpDivide(width, BLOCK_SIZE)); // float tm = 0; // //tm = timeGetTime(); // cudaEventRecord(start, 0); // IncrementScan::ScanX<DataType, BLOCK_DIM_X, BLOCK_DIM_Y, COUNT_PER_THREAD> << <grid_sizeX, block_sizeX >> > (devA.GetData(), devB.GetData(), width, devA.DataPitch(), height); // //IncrementScan::IncrementScan<DataType, BLOCK_SIZE, 4 * sizeof(uint) / sizeof(DataType), BLOCK_DIM_X> << <grid_size1, block_size, 0, SM.stream >> > (devA.GetData(), devTmp.GetData(), width, width, height, height); // //IncrementScan::IncrementScan<DataType, BLOCK_SIZE, 4 * sizeof(uint) / sizeof(DataType), BLOCK_DIM_X> << <grid_size2, block_size, 0, SM.stream >> > (devTmp.GetData(), devB.GetData(), height, height, width, width); // cudaDeviceSynchronize(); // cudaEventRecord(stop, 0); // //CUDA_CHECK_ERROR; // // // //tm = timeGetTime() - tm; // // cudaEventSynchronize(stop); // cudaEventElapsedTime(&inc, start, stop); // // devB.CopyToHost(&vecB[0], width, width, height); // printf("%d, %d, total time = %f, %f\n", width, height, tm, inc); // //cudaSyncDevice(); // std::cout << "end : TestSerielScan" << std::endl; //#if 0 // FILE* fp = fopen("d:/ints.raw", "wb"); // if (fp) { // fwrite(&vecB[0], sizeof(vecB[0]), width*height, fp); // fclose(fp); // } //#endif // FILE* flog = fopen("d:/log.csv", "wt"); // if (flog) { // for (int i = 0; i < vecB.size(); i++) { // DataType* p = &vecB[0]; // fprintf(flog, "%.2f ", p[i]); // if (i % width == (width - 1)) // fprintf(flog, "\n"); // fflush(flog); // } // fclose(flog); // } // // // } // void TestY(int width, int height) { // DISPLAY_FUNCTION; // std::cout << __FUNCTION__ << std::endl; // std::cout << "begin : TestIncrementScan" << std::endl; // float inc = 0; // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // // typedef float DataType; // // //const uint BLOCK_SIZE = 32; // const uint THREAD_COUNT_PER_BLOCK = 1024; // //const uint BLOCK_DIM_X = 256 * 4; // //int width = 1024 * 2; // //int height = 1024 * 2; // int size = width*height; // std::vector<DataType> vecA(size), vecB(size); // //for (int i = 0; i < height-16; i += 32) std::fill(vecA.begin()+i*width, vecA.begin() + (i+16)*width, 1); // // std::fill(vecA.begin(), vecA.end(), 1); // // // DevData<DataType> devA(width, height), devB(width, height), devTmp(height, width); // devA.CopyFromHost(&vecA[0], width, width, height); // // DevStream SM; // const int PROCESS_COUNT_PER_THREAD_Y = 32; // const int WARP_COUNT = THREAD_COUNT_PER_BLOCK / WARP_SIZE; // const dim3 block_sizeY(WARP_SIZE, WARP_COUNT); // dim3 grid_sizeY(UpDivide(width, block_sizeY.x), 1); // // //dim3 grid_size1(1, UpDivide(height, BLOCK_SIZE)); // //dim3 grid_size2(1, UpDivide(width, BLOCK_SIZE)); // float tm = 0; // //tm = timeGetTime(); // cudaEventRecord(start, 0); // IncrementScan::ScanY<DataType, PROCESS_COUNT_PER_THREAD_Y, WARP_COUNT> << <grid_sizeY, block_sizeY >> > (devA.GetData(), devB.GetData(), width, devA.DataPitch(), height); // //IncrementScan::IncrementScan<DataType, BLOCK_SIZE, 4 * sizeof(uint) / sizeof(DataType), BLOCK_DIM_X> << <grid_size1, block_size, 0, SM.stream >> > (devA.GetData(), devTmp.GetData(), width, width, height, height); // //IncrementScan::IncrementScan<DataType, BLOCK_SIZE, 4 * sizeof(uint) / sizeof(DataType), BLOCK_DIM_X> << <grid_size2, block_size, 0, SM.stream >> > (devTmp.GetData(), devB.GetData(), height, height, width, width); // cudaDeviceSynchronize(); // cudaEventRecord(stop, 0); // //CUDA_CHECK_ERROR; // // // //tm = timeGetTime() - tm; // // cudaEventSynchronize(stop); // cudaEventElapsedTime(&inc, start, stop); // // devB.CopyToHost(&vecB[0], width, width, height); // printf("%d, %d, total time = %f, %f\n", width, height, tm, inc); // //cudaSyncDevice(); // std::cout << "end : TestSerielScan" << std::endl; //#if 0 // FILE* fp = fopen("d:/ints.raw", "wb"); // if (fp) { // fwrite(&vecB[0], sizeof(vecB[0]), width*height, fp); // fclose(fp); // } //#endif //#if 1 // FILE* flog = fopen("d:/log.csv", "wt"); // if (flog) { // for (int i = 0; i < vecB.size(); i++) { // DataType* p = &vecB[0]; // fprintf(flog, "%.2f ", p[i]); // if (i % width == (width - 1)) // fprintf(flog, "\n"); // fflush(flog); // } // fclose(flog); // } //#endif // } template<typename TSrc, typename TDst, int SCAN_TYPE> void Test(int width, int height) { DISPLAY_FUNCTION; std::cout << GetDataType<TSrc>::name() << "-->" << GetDataType<TDst>::name() << ", ScanName=" << ScanName(SCAN_TYPE) << std::endl; const int REPEAT_COUNT = 1; float inc = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //typedef float DataType; dim3 block_sizeX, grid_sizeX; //X const uint THREAD_COUNT_PER_BLOCK = 1024*sizeof(float)/sizeof(TDst); const int BLOCK_DIM_X = 32; const int BLOCK_DIM_Y = THREAD_COUNT_PER_BLOCK / BLOCK_DIM_X; const int COUNT_PER_THREAD = 32; block_sizeX = dim3(BLOCK_DIM_X, BLOCK_DIM_Y); grid_sizeX = dim3(1, UpDivide(height, block_sizeX.y)); //Y dim3 block_sizeY, grid_sizeY; const int PROCESS_COUNT_PER_THREAD_Y = 32*1; const int WARP_COUNT = THREAD_COUNT_PER_BLOCK / WARP_SIZE; block_sizeY = dim3(WARP_SIZE, WARP_COUNT); grid_sizeY = dim3(UpDivide(width, block_sizeY.x), 1); int size = width*height; std::vector<TSrc> vecA(size); std::vector<TDst> vecB(size); //for (int i = 0; i < height-16; i += 32) std::fill(vecA.begin()+i*width, vecA.begin() + (i+16)*width, 1); #if 0 std::fill(vecA.begin(), vecA.end(), 1); #else for (int i = 0; i < vecA.size(); i++) vecA[i] = (TSrc)(abs(rand()) % 2); #endif DevData<TSrc> devA(width, height); DevData<TDst> devB(width, height), devTmp(height, width); devA.CopyFromHost(&vecA[0], width, width, height); ShowGridBlockDim("ScanX", grid_sizeX, block_sizeX); ShowGridBlockDim("ScanY", grid_sizeY, block_sizeY); float tm = 0; tm = timeGetTime(); cudaEventRecord(start, 0); #pragma unroll for (int k=0; k<REPEAT_COUNT; k ++){ IncrementScan::ScanX<TSrc, TDst, SCAN_TYPE, BLOCK_DIM_X, BLOCK_DIM_Y, COUNT_PER_THREAD> << <grid_sizeX, block_sizeX >> > (devA.GetData(), devTmp.GetData(), width, devA.DataPitch(), height); IncrementScan::ScanY<TDst, TDst, SCAN_TYPE, PROCESS_COUNT_PER_THREAD_Y, WARP_COUNT> << <grid_sizeY, block_sizeY >> > (devTmp.GetData(), devB.GetData(), width, devA.DataPitch(), height); } cudaDeviceSynchronize(); cudaEventRecord(stop, 0); CUDA_CHECK_ERROR; tm = timeGetTime() - tm; cudaEventSynchronize(stop); cudaEventElapsedTime(&inc, start, stop); devB.CopyToHost(&vecB[0], width, width, height); printf("%d, %d, total time = %f, %f\n", width, height, tm, inc); //cudaSyncDevice(); { std::vector<TDst> vecTmp(size); IntegralImageSerial(&vecA[0], &vecTmp[0], width, height); bool bCmp = Compare(&vecB[0], &vecTmp[0], width, height); printf("compare = %s\n", bCmp ? "successed" : "failed"); } //SaveToRaw(StringFormat("./%d-%d.raw", width, height).c_str(), &vecB[0], width, height); //SaveToText("./tmp.txt", &vecTmp[0], devTmp.width, devTmp.height); //SaveToText("./vecB.txt", &vecB[0], width, height); } }; //ScanRowColumn void TestIncreamentScan(int argc, char** argv) { std::cout << "------------------------------------------------------" << std::endl; int nScanType = SCAN::LF_SCAN; int dType0 = DataType::TypeFLOAT32; int dType1 = DataType::TypeFLOAT32; int repeat = 1; #define Scan IncrementScan GetArgs(argc, argv, nScanType, dType0, dType1, repeat); #if 0 //for (int i=0; i<10; i++) Scan::Test<uint, double, SCAN::LF_SCAN>(1024 * 1, 1024 * 1); #else for (int i = 1; i <= repeat; i++) { int size = i * 1024; if (nScanType == SCAN::KoggeStone_SCAN) { if (dType0 == DataType::TypeUINT8) { if (dType1 == DataType::TypeUINT32) { Scan::Test<uchar, uint, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<uchar, float, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<uchar, double, SCAN::KoggeStone_SCAN>(size, size); } }if (dType0 == DataType::TypeINT8) { if (dType1 == DataType::TypeINT32) { Scan::Test<char, int, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<char, float, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<char, double, SCAN::KoggeStone_SCAN>(size, size); } } if (dType0 == DataType::TypeINT32) { if (dType1 == DataType::TypeINT32) { Scan::Test<int, int, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<int, double, SCAN::KoggeStone_SCAN>(size, size); } } else if (dType0 == DataType::TypeFLOAT32) { if (dType1 == DataType::TypeUINT32) { } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<float, float, SCAN::KoggeStone_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<float, double, SCAN::KoggeStone_SCAN>(size, size); } } else if (dType0 == DataType::TypeFLOAT64) { if (dType1 == DataType::TypeUINT32) { } else if (dType1 == DataType::TypeFLOAT32) { } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<double, double, SCAN::KoggeStone_SCAN>(size, size); } } } else { if (dType0 == DataType::TypeUINT8) { if (dType1 == DataType::TypeUINT32) { Scan::Test<uchar, uint, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<uchar, float, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<uchar, double, SCAN::LF_SCAN>(size, size); } }if (dType0 == DataType::TypeINT8) { if (dType1 == DataType::TypeINT32) { Scan::Test<char, int, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<char, float, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<char, double, SCAN::LF_SCAN>(size, size); } } if (dType0 == DataType::TypeINT32) { if (dType1 == DataType::TypeINT32) { Scan::Test<int, int, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT32) { } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<int, double, SCAN::LF_SCAN>(size, size); } } else if (dType0 == DataType::TypeFLOAT32) { if (dType1 == DataType::TypeUINT32) { } else if (dType1 == DataType::TypeFLOAT32) { Scan::Test<float, float, SCAN::LF_SCAN>(size, size); } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<float, double, SCAN::LF_SCAN>(size, size); } } else if (dType0 == DataType::TypeFLOAT64) { if (dType1 == DataType::TypeUINT32) { } else if (dType1 == DataType::TypeFLOAT32) { } else if (dType1 == DataType::TypeFLOAT64) { Scan::Test<double, double, SCAN::LF_SCAN>(size, size); } } } } #undef Scan #endif std::cout << "------------------------------------------------------" << std::endl;; }
666d47a48bdb14d80f8a91df30b70383ac323fe6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaLib.cuh" void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void saxpy_gpu (float* x, float* y, float scale, int size) { // Insert GPU SAXPY kernel code here } int runGpuSaxpy(int vectorSize) { std::cout << "Hello GPU Saxpy!\n"; // Insert code here std::cout << "Lazy, you are!\n"; std::cout << "Write code, you must\n"; return 0; } __global__ void generatePoints (uint64_t * pSums, uint64_t pSumSize, uint64_t sampleSize) { // Insert code here } __global__ void reduceCounts (uint64_t * pSums, uint64_t * totals, uint64_t pSumSize, uint64_t reduceSize) { // Insert code here } int runGpuMCPi (uint64_t generateThreadCount, uint64_t sampleSize, uint64_t reduceThreadCount, uint64_t reduceSize) { // Check CUDA device presence int numDev; hipGetDeviceCount(&numDev); if (numDev < 1) { std::cout << "CUDA device missing!\n"; return -1; } auto tStart = std::chrono::high_resolution_clock::now(); float approxPi = estimatePi(generateThreadCount, sampleSize, reduceThreadCount, reduceSize); std::cout << "Estimated Pi = " << approxPi << "\n"; auto tEnd= std::chrono::high_resolution_clock::now(); std::chrono::duration<double> time_span = (tEnd- tStart); std::cout << "It took " << time_span.count() << " seconds."; return 0; } double estimatePi(uint64_t generateThreadCount, uint64_t sampleSize, uint64_t reduceThreadCount, uint64_t reduceSize) { double approxPi = 3.14159f; std::cout << "Sneaky, you are ...\n"; std::cout << "Compute pi, you must!\n"; return approxPi; } int runGpuMedianFilter (std::string imgPath, std::string outPath, MedianFilterArgs args) { std::cout << "Lazy, you are! ... "; std::cout << "Filter pixels, you must! ... "; return 0; } int medianFilter_gpu (uint8_t inPixels, ImageDim imgDim, uint8_t outPixels, MedianFilterArgs args) { return 0; } int runGpuConv (int argc, char ** argv) { TensorShape iShape = AlexL1_InShape; TensorShape fShape = AlexL1_FilterShape; ConvLayerArgs convArgs = AlexL1_ConvArgs; std::cout << "Evaluate convolution : \n"; std::cout << "Input : " << iShape << " \n"; std::cout << "Filter : " << fShape << " \n"; TensorShape oShape; uint64_t errorCount = evaluateGpuConv(iShape, fShape, oShape, convArgs); std::cout << "Found " << errorCount << " / " << tensorSize(oShape) << " errors \n"; return 0; } uint64_t evaluateGpuConv (TensorShape iShape, TensorShape fShape, TensorShape & oShape, ConvLayerArgs args) { uint64_t errorCount = 0; // STUDENT: Add code here #ifndef CONV_CHECK_DISABLE // STUDENT: Verify number of errors in ouput matrix generated by convLayer_gpu // STUDENT: Compare results with CPU output // STUDENT: Return error count #endif return errorCount; } int convLayer_gpu ( float * input, TensorShape iShape, float * filter, TensorShape fShape, float * bias, float * output, TensorShape & oShape, ConvLayerArgs & args, uint32_t batchSize) { return 0; } int runGpuGemm (int argc, char ** argv) { evaluateGpuGemm(); return 0; } int evaluateGpuGemm () { return 0; } // STUDENT: Add functions here
666d47a48bdb14d80f8a91df30b70383ac323fe6.cu
#include "cudaLib.cuh" void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void saxpy_gpu (float* x, float* y, float scale, int size) { // Insert GPU SAXPY kernel code here } int runGpuSaxpy(int vectorSize) { std::cout << "Hello GPU Saxpy!\n"; // Insert code here std::cout << "Lazy, you are!\n"; std::cout << "Write code, you must\n"; return 0; } __global__ void generatePoints (uint64_t * pSums, uint64_t pSumSize, uint64_t sampleSize) { // Insert code here } __global__ void reduceCounts (uint64_t * pSums, uint64_t * totals, uint64_t pSumSize, uint64_t reduceSize) { // Insert code here } int runGpuMCPi (uint64_t generateThreadCount, uint64_t sampleSize, uint64_t reduceThreadCount, uint64_t reduceSize) { // Check CUDA device presence int numDev; cudaGetDeviceCount(&numDev); if (numDev < 1) { std::cout << "CUDA device missing!\n"; return -1; } auto tStart = std::chrono::high_resolution_clock::now(); float approxPi = estimatePi(generateThreadCount, sampleSize, reduceThreadCount, reduceSize); std::cout << "Estimated Pi = " << approxPi << "\n"; auto tEnd= std::chrono::high_resolution_clock::now(); std::chrono::duration<double> time_span = (tEnd- tStart); std::cout << "It took " << time_span.count() << " seconds."; return 0; } double estimatePi(uint64_t generateThreadCount, uint64_t sampleSize, uint64_t reduceThreadCount, uint64_t reduceSize) { double approxPi = 3.14159f; std::cout << "Sneaky, you are ...\n"; std::cout << "Compute pi, you must!\n"; return approxPi; } int runGpuMedianFilter (std::string imgPath, std::string outPath, MedianFilterArgs args) { std::cout << "Lazy, you are! ... "; std::cout << "Filter pixels, you must! ... "; return 0; } int medianFilter_gpu (uint8_t inPixels, ImageDim imgDim, uint8_t outPixels, MedianFilterArgs args) { return 0; } int runGpuConv (int argc, char ** argv) { TensorShape iShape = AlexL1_InShape; TensorShape fShape = AlexL1_FilterShape; ConvLayerArgs convArgs = AlexL1_ConvArgs; std::cout << "Evaluate convolution : \n"; std::cout << "Input : " << iShape << " \n"; std::cout << "Filter : " << fShape << " \n"; TensorShape oShape; uint64_t errorCount = evaluateGpuConv(iShape, fShape, oShape, convArgs); std::cout << "Found " << errorCount << " / " << tensorSize(oShape) << " errors \n"; return 0; } uint64_t evaluateGpuConv (TensorShape iShape, TensorShape fShape, TensorShape & oShape, ConvLayerArgs args) { uint64_t errorCount = 0; // STUDENT: Add code here #ifndef CONV_CHECK_DISABLE // STUDENT: Verify number of errors in ouput matrix generated by convLayer_gpu // STUDENT: Compare results with CPU output // STUDENT: Return error count #endif return errorCount; } int convLayer_gpu ( float * input, TensorShape iShape, float * filter, TensorShape fShape, float * bias, float * output, TensorShape & oShape, ConvLayerArgs & args, uint32_t batchSize) { return 0; } int runGpuGemm (int argc, char ** argv) { evaluateGpuGemm(); return 0; } int evaluateGpuGemm () { return 0; } // STUDENT: Add functions here
81544a59baf2779f3b9029890003372a3a534a16.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <chrono> #include <cstring> #include <fstream> #include <iostream> #include <stdexcept> #include "tiffio.h" // saves TIFF file from data in `raster` void save_tiff(const char *fname, uint32 *raster, uint32 w, uint32 h) { TIFF *tif = TIFFOpen(fname, "w"); if (! raster) { throw std::runtime_error("Could not open output file"); } TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, w); TIFFSetField(tif, TIFFTAG_IMAGELENGTH, h); TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 4); TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8); TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE); TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT); TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB); TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG); TIFFWriteEncodedStrip(tif, 0, raster, w*h*4); TIFFClose(tif); } // loads image data from `fname` (allocating dynamic memory) // *w and *h are updated with the image dimensions // raster is a matrix flattened into an array using row-major order // every uint32 in the array is 4 bytes, enconding 8-bit packed ABGR // A: transparency attribute (can be ignored) // B: blue pixel // G: green pixel // R: red pixel uint32 *load_tiff(const char *fname, uint32 *w, uint32 *h) { TIFF *tif = TIFFOpen(fname, "r"); if (! tif) { throw std::runtime_error("Could not open input file"); } TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, w); TIFFGetField(tif, TIFFTAG_IMAGELENGTH, h); uint32 *raster = (uint32 *) _TIFFmalloc(*w * *h * sizeof (uint32)); if (! raster) { TIFFClose(tif); throw std::runtime_error("Memory allocation error"); } if (! TIFFReadRGBAImageOriented(tif, *w, *h, raster, ORIENTATION_TOPLEFT, 0)) { TIFFClose(tif); throw std::runtime_error("Could not read raster from TIFF image"); } TIFFClose(tif); return raster; } // Clamp function able to be used on the GPU __device__ void cudaClamp(float *val) { if (*val < 0) *val = 0; if (*val > 255) *val = 255; } void clamp(float *val) { if (*val < 0) *val = 0; if (*val > 255) *val = 255; } // Kernel for calculation of one pixel in the raster __global__ void convolve(uint32 *raster, uint32 *copy, int w, int h, const float *filter, int st, int d) { // Calculate the row and column indices of the pixel to handle based on the block and thread int row = st + (blockIdx.y * blockDim.y) + threadIdx.y; int col = st + (blockIdx.x * blockDim.x) + threadIdx.x; // Check to make sure we are in a valid part of the grid (blocks overflow off the grid) if (row < h-st && col < w-st) { int index = row*w + col; int startIndex = index - (st*w) - st; // Accumulate RGB values float sumR, sumG, sumB; uint32 idx, pixel, kd; sumR = sumG = sumB = 0; for (uint32 k = 0 ; k < d ; k ++) { idx = startIndex + k*w; kd = k*d; for (uint32 l = 0 ; l < d ; l++) { pixel = copy[idx++]; sumR += (filter[kd + l] * TIFFGetR(pixel)); sumG += (filter[kd + l] * TIFFGetG(pixel)); sumB += (filter[kd + l] * TIFFGetB(pixel)); } } // Check that RGB channels to write to the raster are not <0 or >255 cudaClamp(&sumR); cudaClamp(&sumG); cudaClamp(&sumB); // Write the ARGB channels to the pixel using bitwise shifts and ORing of bits raster[index] = TIFFGetA(raster[index]) << 24 | ((uint32) sumB << 16) | ((uint32) sumG << 8) | ((uint32) sumR); } } void filter_image_seq(uint32 *raster, uint32 w, uint32 h, const float *filter, int f_len) { // to get RGB values from a pixel, you can either use bitwise masks // or rely on the following macros: // TIFFGetR(raster[i]) red // TIFFGetG(raster[i]) green // TIFFGetB(raster[i]) blue // TIFFGetA(raster[i]) this value should be ignored // // to modify RGB values from a pixel, you can use bitwise shifts or masks // each pixel stores values in the order ABGR // // TODO: here you will filter the image in raster // uint32 *copy = new uint32[w*h]; std::memcpy(copy, raster, sizeof(uint32)*w*h); uint32 d = (uint32) std::sqrt(f_len); uint32 idx, pixel; uint32 st = d / 2; uint32 end_w = w - d/2; uint32 end_h = h - d/2; float sumR, sumG, sumB; // applies filter for (uint32 i = st ; i < end_h ; i++) { for (uint32 j = st ; j < end_w ; j++) { sumR = sumG = sumB = 0; for (uint32 k = 0 ; k < d ; k ++) { idx = (i-st+k)*w + (j-st); for (uint32 l = 0 ; l < d ; l++) { pixel = copy[idx++]; sumR += (filter[k*d + l] * TIFFGetR(pixel)); sumG += (filter[k*d + l] * TIFFGetG(pixel)); sumB += (filter[k*d + l] * TIFFGetB(pixel)); } } clamp(&sumR); clamp(&sumG); clamp(&sumB); raster[i*w + j] = TIFFGetA(raster[i*w + j]) << 24 | ((uint32) sumB << 16) | ((uint32) sumG << 8) | ((uint32) sumR); } } delete [] copy; } void filter_image_par(uint32 *raster, uint32 w, uint32 h, const float *filter, int f_len, int blockSize) { // // TODO: here you will filter the image in raster using GPU threads // // to get RGB values from a pixel, you can either use bitwise masks // or rely on the following macros: // TIFFGetR(raster[i]) red // TIFFGetG(raster[i]) green // TIFFGetB(raster[i]) blue // TIFFGetA(raster[i]) this value should be ignored // // to modify RGB values from a pixel, you can use bitwise shifts or masks // each pixel stores values in the order ABGR // // TODO: here you will filter the image in raster // uint32 *copy = new uint32[w*h]; std::memcpy(copy, raster, sizeof(uint32)*w*h); uint32 d = (uint32) std::sqrt(f_len); uint32 st = d / 2; uint32 end_w = w - d/2; uint32 end_h = h - d/2; // Declare the device versions of the arrays uint32 *dev_raster; float *dev_filter; uint32 *dev_copy; // Variable to check for CUDA errors hipError_t status; // Choose GPU to run status = hipSetDevice(0); if (status != hipSuccess) std::cerr << "hipSetDevice failed!" << std::endl; // Allocate space for the arrays in the GPU status = hipMalloc(&dev_raster, sizeof(uint32) * (w*h)); if (status != hipSuccess) std::cerr << "hipMalloc (in) failed!" << std::endl; status = hipMalloc(&dev_copy, sizeof(uint32) * (w*h)); if (status != hipSuccess) std::cerr << "hipMalloc (in) failed!" << std::endl; status = hipMalloc(&dev_filter, sizeof(float) * f_len); if (status != hipSuccess) std::cerr << "hipMalloc (in) failed!" << std::endl; // Transfer image raster and filter data from CPU to GPU status = hipMemcpy(dev_raster, raster, sizeof(float) * (w*h), hipMemcpyHostToDevice); if (status != hipSuccess) std::cerr << "hipMemcpy H2D failed!" << std::endl; status = hipMemcpy(dev_copy, raster, sizeof(float) * (w*h), hipMemcpyHostToDevice); if (status != hipSuccess) std::cerr << "hipMemcpy H2D failed!" << std::endl; status = hipMemcpy(dev_filter, filter, sizeof(float) * (f_len), hipMemcpyHostToDevice); if (status != hipSuccess) std::cerr << "hipMemcpy H2D failed!" << std::endl; // Defines how many threads per block there are in the x and y dimensions dim3 threadsPerBlock(blockSize, blockSize, 1); // Computes how many blocks will fit into the image with one thread per pixel // Overflows past the end of the image rows and columns when the image dimensions (x&y) don't divide evenly by the block dimensions (x&y) dim3 numBlocks((int)::ceil((float)(end_w-st)/(float)threadsPerBlock.x), (int)::ceil((float)(end_h-st)/(float)threadsPerBlock.y), 1); // Do the work in the GPU hipLaunchKernelGGL(( convolve), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, dev_raster, dev_copy, w, h, dev_filter, st, d); // Wait for the kernel to finish, and check for errors status = hipDeviceSynchronize(); if (status != hipSuccess) std::cerr << "error code " << status << " returned after kernel!" << std::endl; // Transfer resulting image raster from GPU to CPU status = hipMemcpy(raster, dev_raster, sizeof(uint32) * (w*h), hipMemcpyDeviceToHost); if (status != hipSuccess) std::cerr << "hipMemcpy D2H failed!" << std::endl; // Free the memory allocated in the GPU hipFree(dev_raster); hipFree(dev_copy); hipFree(dev_filter); // Free the copy we made free(copy); } float *load_filter(const char *fname, int *n) { std::ifstream myfile(fname); if (! myfile) { throw std::runtime_error("Could not open filter file"); } myfile >> *n; float *filter = new float[*n]; for (int i = 0 ; i < *n ; i++) myfile >> filter[i]; myfile.close(); return filter; } int main(int argc, char* argv[]) { if (argc != 6) { std::cout << "Usage:\t./filter <in_fname> <out_fname> <filter_fname> <algo>" << std::endl; std::cout << "<in_fname> path to the input image" << std::endl; std::cout << "<out_fname> path to the output image" << std::endl; std::cout << "<filter_fname> path to the filter file" << std::endl; std::cout << "<algo> whether to use the sequential (seq) or parallel algorithm (par)" << std::endl; return 0; } // Defines the x&y dimension of the thread block used in the parallel solution int blockSize; sscanf(argv[5], "%d", &blockSize); uint32 width, height; // loads the filter int f_len; float *filter = load_filter(argv[3], &f_len); // loads image bytes from file name supplied as a command line argument // this function allocates memory dynamically uint32 *image = load_tiff(argv[1], &width, &height); // Make a malloc in the GPU to load the CUDA library and make sure that it is working properly uint32 *dev_initCuda; hipError_t status; status = hipMalloc(&dev_initCuda, 1); if (status != hipSuccess) std::cerr << "hipMalloc (in) failed!" << std::endl; // measure time of the algorithm auto start = std::chrono::high_resolution_clock::now(); if (! std::strcmp(argv[4], "seq")) { // call the sequential implementation filter_image_seq(image, width, height, filter, f_len); } else if (! std::strcmp(argv[4], "par")) { // TODO: call the parallel implementation filter_image_par(image, width, height, filter, f_len, blockSize); } auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> diff = end - start; std::cout << diff.count(); // save new file with filtered image save_tiff(argv[2], image, width, height); // frees memory allocated by load_filter and load_tiff delete [] filter; _TIFFfree(image); return 0; }
81544a59baf2779f3b9029890003372a3a534a16.cu
#include <cmath> #include <chrono> #include <cstring> #include <fstream> #include <iostream> #include <stdexcept> #include "tiffio.h" // saves TIFF file from data in `raster` void save_tiff(const char *fname, uint32 *raster, uint32 w, uint32 h) { TIFF *tif = TIFFOpen(fname, "w"); if (! raster) { throw std::runtime_error("Could not open output file"); } TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, w); TIFFSetField(tif, TIFFTAG_IMAGELENGTH, h); TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 4); TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8); TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE); TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT); TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB); TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG); TIFFWriteEncodedStrip(tif, 0, raster, w*h*4); TIFFClose(tif); } // loads image data from `fname` (allocating dynamic memory) // *w and *h are updated with the image dimensions // raster is a matrix flattened into an array using row-major order // every uint32 in the array is 4 bytes, enconding 8-bit packed ABGR // A: transparency attribute (can be ignored) // B: blue pixel // G: green pixel // R: red pixel uint32 *load_tiff(const char *fname, uint32 *w, uint32 *h) { TIFF *tif = TIFFOpen(fname, "r"); if (! tif) { throw std::runtime_error("Could not open input file"); } TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, w); TIFFGetField(tif, TIFFTAG_IMAGELENGTH, h); uint32 *raster = (uint32 *) _TIFFmalloc(*w * *h * sizeof (uint32)); if (! raster) { TIFFClose(tif); throw std::runtime_error("Memory allocation error"); } if (! TIFFReadRGBAImageOriented(tif, *w, *h, raster, ORIENTATION_TOPLEFT, 0)) { TIFFClose(tif); throw std::runtime_error("Could not read raster from TIFF image"); } TIFFClose(tif); return raster; } // Clamp function able to be used on the GPU __device__ void cudaClamp(float *val) { if (*val < 0) *val = 0; if (*val > 255) *val = 255; } void clamp(float *val) { if (*val < 0) *val = 0; if (*val > 255) *val = 255; } // Kernel for calculation of one pixel in the raster __global__ void convolve(uint32 *raster, uint32 *copy, int w, int h, const float *filter, int st, int d) { // Calculate the row and column indices of the pixel to handle based on the block and thread int row = st + (blockIdx.y * blockDim.y) + threadIdx.y; int col = st + (blockIdx.x * blockDim.x) + threadIdx.x; // Check to make sure we are in a valid part of the grid (blocks overflow off the grid) if (row < h-st && col < w-st) { int index = row*w + col; int startIndex = index - (st*w) - st; // Accumulate RGB values float sumR, sumG, sumB; uint32 idx, pixel, kd; sumR = sumG = sumB = 0; for (uint32 k = 0 ; k < d ; k ++) { idx = startIndex + k*w; kd = k*d; for (uint32 l = 0 ; l < d ; l++) { pixel = copy[idx++]; sumR += (filter[kd + l] * TIFFGetR(pixel)); sumG += (filter[kd + l] * TIFFGetG(pixel)); sumB += (filter[kd + l] * TIFFGetB(pixel)); } } // Check that RGB channels to write to the raster are not <0 or >255 cudaClamp(&sumR); cudaClamp(&sumG); cudaClamp(&sumB); // Write the ARGB channels to the pixel using bitwise shifts and ORing of bits raster[index] = TIFFGetA(raster[index]) << 24 | ((uint32) sumB << 16) | ((uint32) sumG << 8) | ((uint32) sumR); } } void filter_image_seq(uint32 *raster, uint32 w, uint32 h, const float *filter, int f_len) { // to get RGB values from a pixel, you can either use bitwise masks // or rely on the following macros: // TIFFGetR(raster[i]) red // TIFFGetG(raster[i]) green // TIFFGetB(raster[i]) blue // TIFFGetA(raster[i]) this value should be ignored // // to modify RGB values from a pixel, you can use bitwise shifts or masks // each pixel stores values in the order ABGR // // TODO: here you will filter the image in raster // uint32 *copy = new uint32[w*h]; std::memcpy(copy, raster, sizeof(uint32)*w*h); uint32 d = (uint32) std::sqrt(f_len); uint32 idx, pixel; uint32 st = d / 2; uint32 end_w = w - d/2; uint32 end_h = h - d/2; float sumR, sumG, sumB; // applies filter for (uint32 i = st ; i < end_h ; i++) { for (uint32 j = st ; j < end_w ; j++) { sumR = sumG = sumB = 0; for (uint32 k = 0 ; k < d ; k ++) { idx = (i-st+k)*w + (j-st); for (uint32 l = 0 ; l < d ; l++) { pixel = copy[idx++]; sumR += (filter[k*d + l] * TIFFGetR(pixel)); sumG += (filter[k*d + l] * TIFFGetG(pixel)); sumB += (filter[k*d + l] * TIFFGetB(pixel)); } } clamp(&sumR); clamp(&sumG); clamp(&sumB); raster[i*w + j] = TIFFGetA(raster[i*w + j]) << 24 | ((uint32) sumB << 16) | ((uint32) sumG << 8) | ((uint32) sumR); } } delete [] copy; } void filter_image_par(uint32 *raster, uint32 w, uint32 h, const float *filter, int f_len, int blockSize) { // // TODO: here you will filter the image in raster using GPU threads // // to get RGB values from a pixel, you can either use bitwise masks // or rely on the following macros: // TIFFGetR(raster[i]) red // TIFFGetG(raster[i]) green // TIFFGetB(raster[i]) blue // TIFFGetA(raster[i]) this value should be ignored // // to modify RGB values from a pixel, you can use bitwise shifts or masks // each pixel stores values in the order ABGR // // TODO: here you will filter the image in raster // uint32 *copy = new uint32[w*h]; std::memcpy(copy, raster, sizeof(uint32)*w*h); uint32 d = (uint32) std::sqrt(f_len); uint32 st = d / 2; uint32 end_w = w - d/2; uint32 end_h = h - d/2; // Declare the device versions of the arrays uint32 *dev_raster; float *dev_filter; uint32 *dev_copy; // Variable to check for CUDA errors cudaError_t status; // Choose GPU to run status = cudaSetDevice(0); if (status != cudaSuccess) std::cerr << "cudaSetDevice failed!" << std::endl; // Allocate space for the arrays in the GPU status = cudaMalloc(&dev_raster, sizeof(uint32) * (w*h)); if (status != cudaSuccess) std::cerr << "cudaMalloc (in) failed!" << std::endl; status = cudaMalloc(&dev_copy, sizeof(uint32) * (w*h)); if (status != cudaSuccess) std::cerr << "cudaMalloc (in) failed!" << std::endl; status = cudaMalloc(&dev_filter, sizeof(float) * f_len); if (status != cudaSuccess) std::cerr << "cudaMalloc (in) failed!" << std::endl; // Transfer image raster and filter data from CPU to GPU status = cudaMemcpy(dev_raster, raster, sizeof(float) * (w*h), cudaMemcpyHostToDevice); if (status != cudaSuccess) std::cerr << "cudaMemcpy H2D failed!" << std::endl; status = cudaMemcpy(dev_copy, raster, sizeof(float) * (w*h), cudaMemcpyHostToDevice); if (status != cudaSuccess) std::cerr << "cudaMemcpy H2D failed!" << std::endl; status = cudaMemcpy(dev_filter, filter, sizeof(float) * (f_len), cudaMemcpyHostToDevice); if (status != cudaSuccess) std::cerr << "cudaMemcpy H2D failed!" << std::endl; // Defines how many threads per block there are in the x and y dimensions dim3 threadsPerBlock(blockSize, blockSize, 1); // Computes how many blocks will fit into the image with one thread per pixel // Overflows past the end of the image rows and columns when the image dimensions (x&y) don't divide evenly by the block dimensions (x&y) dim3 numBlocks((int)std::ceil((float)(end_w-st)/(float)threadsPerBlock.x), (int)std::ceil((float)(end_h-st)/(float)threadsPerBlock.y), 1); // Do the work in the GPU convolve<<<numBlocks, threadsPerBlock>>>(dev_raster, dev_copy, w, h, dev_filter, st, d); // Wait for the kernel to finish, and check for errors status = cudaThreadSynchronize(); if (status != cudaSuccess) std::cerr << "error code " << status << " returned after kernel!" << std::endl; // Transfer resulting image raster from GPU to CPU status = cudaMemcpy(raster, dev_raster, sizeof(uint32) * (w*h), cudaMemcpyDeviceToHost); if (status != cudaSuccess) std::cerr << "cudaMemcpy D2H failed!" << std::endl; // Free the memory allocated in the GPU cudaFree(dev_raster); cudaFree(dev_copy); cudaFree(dev_filter); // Free the copy we made free(copy); } float *load_filter(const char *fname, int *n) { std::ifstream myfile(fname); if (! myfile) { throw std::runtime_error("Could not open filter file"); } myfile >> *n; float *filter = new float[*n]; for (int i = 0 ; i < *n ; i++) myfile >> filter[i]; myfile.close(); return filter; } int main(int argc, char* argv[]) { if (argc != 6) { std::cout << "Usage:\t./filter <in_fname> <out_fname> <filter_fname> <algo>" << std::endl; std::cout << "<in_fname> path to the input image" << std::endl; std::cout << "<out_fname> path to the output image" << std::endl; std::cout << "<filter_fname> path to the filter file" << std::endl; std::cout << "<algo> whether to use the sequential (seq) or parallel algorithm (par)" << std::endl; return 0; } // Defines the x&y dimension of the thread block used in the parallel solution int blockSize; sscanf(argv[5], "%d", &blockSize); uint32 width, height; // loads the filter int f_len; float *filter = load_filter(argv[3], &f_len); // loads image bytes from file name supplied as a command line argument // this function allocates memory dynamically uint32 *image = load_tiff(argv[1], &width, &height); // Make a malloc in the GPU to load the CUDA library and make sure that it is working properly uint32 *dev_initCuda; cudaError_t status; status = cudaMalloc(&dev_initCuda, 1); if (status != cudaSuccess) std::cerr << "cudaMalloc (in) failed!" << std::endl; // measure time of the algorithm auto start = std::chrono::high_resolution_clock::now(); if (! std::strcmp(argv[4], "seq")) { // call the sequential implementation filter_image_seq(image, width, height, filter, f_len); } else if (! std::strcmp(argv[4], "par")) { // TODO: call the parallel implementation filter_image_par(image, width, height, filter, f_len, blockSize); } auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> diff = end - start; std::cout << diff.count(); // save new file with filtered image save_tiff(argv[2], image, width, height); // frees memory allocated by load_filter and load_tiff delete [] filter; _TIFFfree(image); return 0; }